diff --git "a/118.jsonl" "b/118.jsonl"
new file mode 100644--- /dev/null
+++ "b/118.jsonl"
@@ -0,0 +1,214 @@
+{"seq_id": "19203175246", "text": "from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, StratifiedKFold, LeavePOut\nfrom setup.model import SimpleNet, test_model, train_model\nfrom setup.MNISTImageDataset import MNISTImageDataset\nfrom torch.utils.data import DataLoader\nimport numpy as np\n\ndef monte_carlo_cross_validation(data, targets):\n\n # defines the batch sizes and shuffles the data\n train_params = {'batch_size': 16,\n 'shuffle': True,\n 'num_workers': 1}\n\n # Monte Carlo\n # Set up seeds and array to store accuracies\n seed_list = [42, 151, 297, 333, 406]\n acc_array = []\n\n for seed in seed_list:\n # split your data into train, validate and test sets\n X_temp, X_test, Y_temp, Y_test = train_test_split(data, targets, test_size=0.3, stratify=targets,\n random_state=seed)\n X_train, X_validate, Y_train, Y_validate = train_test_split(X_temp, Y_temp, test_size=20 / 70, stratify=Y_temp,\n random_state=seed)\n\n training_set = MNISTImageDataset(X_train, Y_train)\n valid_set = MNISTImageDataset(X_validate, Y_validate)\n test_set = MNISTImageDataset(X_test, Y_test)\n\n training_loader = DataLoader(training_set, **train_params)\n valid_loader = DataLoader(valid_set, **train_params)\n test_loader = DataLoader(test_set, **train_params)\n\n best_model, _ = train_model(n_epochs=10, train_loader=training_loader, valid_loader=valid_loader)\n acc = test_model(loader=test_loader, test_model=best_model)\n acc_array.append(acc)\n\n print(f\"Average of all seeds: = {np.array(np.mean(acc_array))}\")\n print(f\"Standard deviation of all seeds: = {np.array(np.std(acc_array))}\")", "repo_name": "zhuemann/Cross-Validation-Guide", "sub_path": "monte_carlo_cross_validation.py", "file_name": "monte_carlo_cross_validation.py", "file_ext": "py", "file_size_in_byte": 1816, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "85", "api": [{"api_name": "sklearn.model_selection.train_test_split", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 23, "usage_type": "call"}, {"api_name": "setup.MNISTImageDataset.MNISTImageDataset", "line_number": 26, "usage_type": "call"}, {"api_name": "setup.MNISTImageDataset.MNISTImageDataset", "line_number": 27, "usage_type": "call"}, {"api_name": "setup.MNISTImageDataset.MNISTImageDataset", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 32, "usage_type": "call"}, {"api_name": "setup.model.train_model", "line_number": 34, "usage_type": "call"}, {"api_name": "setup.model.test_model", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "37186239505", "text": "from http import HTTPStatus\n\nimport pytest\n\nfrom tests.test_services.testdata import search_responses\n\npytestmark = [pytest.mark.asyncio, pytest.mark.integrational]\n\n\nclass TestSearch:\n @pytest.mark.parametrize(\n 'url,status_code,response_json',\n [\n (\n \"/api/v1/films/search/?query=Star&page_number=1&page_size=2\",\n HTTPStatus.OK,\n search_responses.SEARCH_FILMS_SUCCESS,\n ),\n (\n \"/api/v1/films/search/?query=Star&page_number=-1&page_size=2\",\n HTTPStatus.UNPROCESSABLE_ENTITY,\n search_responses.SEARCH_FILMS_UNPROCESSABLE_PAGE_NUMBER,\n ),\n (\n \"/api/v1/films/search/?query=jkngjrkt&page_number=1&page_size=2\",\n HTTPStatus.NOT_FOUND,\n search_responses.SEARCH_FILMS_NOT_FOUND,\n ),\n (\n \"/api/v1/persons/search?query=Carrie&page_size=2&page_number=1\",\n HTTPStatus.OK,\n search_responses.SEARCH_PERSONS_SUCCESS,\n ),\n (\n \"/api/v1/persons/search?query=Carrie&page_size=-2&page_number=1\",\n HTTPStatus.UNPROCESSABLE_ENTITY,\n search_responses.SEARCH_PERSONS_UNPROCESSABLE_PAGE_SIZE,\n ),\n (\n \"/api/v1/persons/search?query=ASJGNRIJGNRKJEG&page_size=2&page_number=1\",\n HTTPStatus.NOT_FOUND,\n search_responses.SEARCH_PERSONS_NOT_FOUND,\n ),\n ],\n )\n async def test_search(self, client, url, status_code, response_json):\n response = await client.get(url)\n assert response.status_code == status_code, response.text\n assert response.json() == response_json\n", "repo_name": "stranded-in-python/movix-api", "sub_path": "src/tests/test_services/test_search.py", "file_name": "test_search.py", "file_ext": "py", "file_size_in_byte": 1788, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "pytest.mark", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus.OK", "line_number": 16, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 16, "usage_type": "name"}, {"api_name": "tests.test_services.testdata.search_responses.SEARCH_FILMS_SUCCESS", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tests.test_services.testdata.search_responses", "line_number": 17, "usage_type": "name"}, {"api_name": "http.HTTPStatus.UNPROCESSABLE_ENTITY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 21, "usage_type": "name"}, {"api_name": "tests.test_services.testdata.search_responses.SEARCH_FILMS_UNPROCESSABLE_PAGE_NUMBER", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tests.test_services.testdata.search_responses", "line_number": 22, "usage_type": "name"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 26, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.test_services.testdata.search_responses.SEARCH_FILMS_NOT_FOUND", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tests.test_services.testdata.search_responses", "line_number": 27, "usage_type": "name"}, {"api_name": "http.HTTPStatus.OK", "line_number": 31, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 31, "usage_type": "name"}, {"api_name": "tests.test_services.testdata.search_responses.SEARCH_PERSONS_SUCCESS", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tests.test_services.testdata.search_responses", "line_number": 32, "usage_type": "name"}, {"api_name": "http.HTTPStatus.UNPROCESSABLE_ENTITY", "line_number": 36, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 36, "usage_type": "name"}, {"api_name": "tests.test_services.testdata.search_responses.SEARCH_PERSONS_UNPROCESSABLE_PAGE_SIZE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tests.test_services.testdata.search_responses", "line_number": 37, "usage_type": "name"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 41, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 41, "usage_type": "name"}, {"api_name": "tests.test_services.testdata.search_responses.SEARCH_PERSONS_NOT_FOUND", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tests.test_services.testdata.search_responses", "line_number": 42, "usage_type": "name"}]}
+{"seq_id": "73505808279", "text": "import datetime\r\nimport calendar\r\nfrom datetime import timedelta\r\n\r\n#set first weekday\r\nc = calendar.Calendar(firstweekday=calendar.SUNDAY)\r\n\r\n####################\r\n# Set this stuff #\r\n####################\r\n\r\n#set the year\r\nyear = 2020\r\n#offset in days from patch Tuesday. 3 = Friday, 4 = Saturday, 5 = Sunday\r\noffsetDays = 4\r\n#set the offset in weeks. W1 = 0, W2 = 1, W3 = 2, W4 = 3\r\noffsetWeeks = 1\r\n\r\n\r\n#turn it into days because we can only do math in days\r\noffsetWeeks = offsetWeeks * 7\r\n\r\n#loop through all months\r\nmonth = 1\r\nwhile month <= 12:\r\n monthcal = c.monthdatescalendar(year,month)\r\n #get second Tuesday\r\n secondTues = [day for week in monthcal for day in week if day.weekday() == calendar.TUESDAY and day.month == month][1]\r\n #add offset\r\n offsetDate = secondTues + datetime.timedelta(days=offsetDays) + datetime.timedelta(days=offsetWeeks)\r\n print(offsetDate)\r\n month += 1\r\n", "repo_name": "kr4spy/PatchTuesday", "sub_path": "patchtuesday.py", "file_name": "patchtuesday.py", "file_ext": "py", "file_size_in_byte": 935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "calendar.Calendar", "line_number": 6, "usage_type": "call"}, {"api_name": "calendar.SUNDAY", "line_number": 6, "usage_type": "attribute"}, {"api_name": "calendar.TUESDAY", "line_number": 28, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "73465247639", "text": "import pprint\r\n\r\nfrom pymongo import MongoClient\r\nclient = MongoClient('mongodb://olk11:7LwEV4mUxIqd@nosql.dcs.aber.ac.uk/olk11')\r\n\r\ndb = client.olk11\r\n\r\noutputfile = open(\"mariners_ships\", encoding=\"utf-8\", mode=\"w\")\r\n\r\ncursor = db.shipsTest.aggregate([\r\n {\"$unwind\": \"$mariners\"},\r\n {\"$group\": {\"_id\": \"$mariners.name\", \r\n \"ships\": {\"$push\": {\"vessel name\": \"$vessel name\",\r\n \"capacity\": \"$mariners.this_ship_capacity\",\r\n \"Date_birth\": \"$mariners.year_of_birth\",\r\n \"leave_date\": \"$mariners.this_ship_leaving_date\",\r\n \"age\": \"$mariners.age\",\r\n \"join_date\": \"$mariners.this_ship_joining_date\"}}}}\r\n])\r\n\r\nfor doc in cursor:\r\n pprint.pprint(doc, outputfile)", "repo_name": "oknappett/data_mining", "sub_path": "prac3/aggregation3.py", "file_name": "aggregation3.py", "file_ext": "py", "file_size_in_byte": 796, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "pymongo.MongoClient", "line_number": 4, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "34289964835", "text": "from cmath import cos\nfrom bert_serving.client import BertClient\nimport speech_recognition as sr\nimport numpy as np\n\n\n# Working with audio files\nr = sr.Recognizer()\nbc = BertClient(ip='139.224.100.23')# ip中是部署了bert模型的服务器地址\n\nstc = [\n \"为我解读一下我这个月的碳信用\",\n \"我现在有多少碳币\",\n \"我的碳信用分数是多少\",\n]\nvec = []\nvec = bc.encode(stc)\nnp.save('./bert_vec.npy',vec)\ninput_vec = []\nvec = np.load('./bert_vec.npy')\n\n\n\nprint(\"您可以向我查询碳币、碳信用等等...\"+'\\n'+'请说话:')\nmicrophone = sr.Microphone()\nwith microphone as source:\n r.adjust_for_ambient_noise(source)\n audio = r.listen(source)\ntry:\n print('录音结束')\n # sentence = r.recognize_sphinx(audio)\n input_sentence = r.recognize_google(audio,language=\"cmn-Hans-CN\") #简体中文\n print('识别结束')\n # 计算用户说的句子的bert向量\n input_vec = bc.encode([input_sentence])\n print(input_sentence)\nexcept:\n print(\"无法识别出句子,请重试。\")\n\n# 将输入句子的向量与预设句子的向量一一求出余弦值,与余弦值最大的匹配成功\ncos_input = []\nfor each in vec:\n each = each.reshape(768,1)\n res = input_vec.dot(each) / (np.linalg.norm(input_vec) * np.linalg.norm(each))\n res = (res[0][0])\n cos_input.append(res)\nprint(cos_input)\nindex = cos_input.index(max(cos_input))\nprint('检测到输入应为预设库中的第'+str(index+1)+'条,“',stc[index]+'”')\n#cos_input = a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b))", "repo_name": "florrietan/citicup", "sub_path": "citicup/thm/voice.py", "file_name": "voice.py", "file_ext": "py", "file_size_in_byte": 1562, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 8, "usage_type": "call"}, {"api_name": "bert_serving.client.BertClient", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 20, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 44, "usage_type": "attribute"}]}
+{"seq_id": "11178176088", "text": "import sqlite3\nimport traceback\n\n\ndef dbConnection():\n try:\n conn = sqlite3.connect('ZscalerClient-versions.db')\n print(\"Opened database successfully\")\n return conn\n except Exception:\n print(\"Error occured\")\n\ndef createBaseTable(conn):\n try:\n conn.execute('''CREATE TABLE ZSCALER_VERSIONS\n (VERSION TEXT NOT NULL,\n TYPE TEXT NOT NULL,\n CATEGORY TEXT NOT NULL);''')\n print(\"Table created successfully\")\n except Exception:\n print(\"Table already exists\")\n\ndef checkVersion(conn,version,type):\n try:\n cur = conn.cursor()\n cur.execute(\"SELECT count(*) from ZSCALER_VERSIONS where VERSION = ? and TYPE = ?\",(version,type,))\n return cur.fetchone()\n except Exception:\n print(\"Problem with checkVersion\")\n \n\ndef recordNewVersion(conn, version, type, category):\n try:\n cur = conn.cursor()\n cur.execute(\"INSERT INTO ZSCALER_VERSIONS(VERSION,TYPE,CATEGORY)values(?,?,?)\",(version,type,category))\n conn.commit()\n return\n except Exception:\n print(\"Error Occured in recording new version\")\n traceback.print_exc()\n\ndef closeConnection(conn):\n try:\n conn.close()\n except Exception:\n print(\"Error Occured in closing connection\")", "repo_name": "priyankasallaram/pyprogs", "sub_path": "db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 1321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "23943627585", "text": "# 336 HashMap | O(N * W ^ 2) where W is the length of the longest word\nfrom typing import List\n\n\ndef maximumScore(nums: List[int], multipliers: List[int]) -> int:\n store = [0] * (len(multipliers) + 1)\n n = len(nums)\n max_score = [0] * len(multipliers)\n for i in reversed(range(len(multipliers))):\n for j in range(i + 1):\n left = store[j + 1] + nums[j] * multipliers[i]\n right = store[j] + nums[n - i + j - 1] * multipliers[i]\n max_score[j] = max(left, right)\n store = max_score\n\n return store[0]\n", "repo_name": "soumyendra98/Data-Structures-and-Algorithms", "sub_path": "Arrays/LeetCode/Palindrome Pairs.py", "file_name": "Palindrome Pairs.py", "file_ext": "py", "file_size_in_byte": 558, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}]}
+{"seq_id": "12409015352", "text": "import requests as re\r\nfrom datetime import date,datetime,timedelta \r\nimport argparse\r\nimport json\r\n\r\n# create parser\r\nparser = argparse.ArgumentParser()\r\n \r\n# add arguments to the parser\r\nparser.add_argument(\"username\")\r\nparser.add_argument(\"reponame\")\r\n\r\n# parse the arguments\r\nargs = parser.parse_args()\r\n\r\nuser = args.username\r\nrepo = args.reponame\r\n\r\nurl_branch = 'https://api.github.com/repos/{}/{}/branches'\r\ndata = re.get(url_branch.format(user,repo)).json() #'Rishabh1803','100DaysOfCode'\r\n\r\n# fetching the latest commited branch\r\nurl_latest_branch = data[0]['commit']['url']\r\ndata = re.get(url_latest_branch).json()\r\n\r\n# fetching latest commit date\r\nlatest_commit_date = data['commit']['committer']['date'][:10]\r\nlatest_commit_date_list = [int(i) for i in latest_commit_date.split('-')]\r\nprint(latest_commit_date_list[0], latest_commit_date_list[1], latest_commit_date_list[2])", "repo_name": "smriti111/100DaysOfCode", "sub_path": "Day1/github_other_branch.py", "file_name": "github_other_branch.py", "file_ext": "py", "file_size_in_byte": 887, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "27178449491", "text": "import pathlib\nimport sys\n\nimport utils\n\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.utils import to_categorical\n\ndef train(data_gen, data_dir, epochs):\n train_iter = data_gen.flow_from_directory(\n data_dir,\n **utils.load_options,\n subset=\"training\"\n )\n\n validation_iter = data_gen.flow_from_directory(\n data_dir,\n **utils.load_options,\n subset=\"validation\"\n )\n\n model = Sequential([\n Conv2D(256, 5, activation='relu', padding='same', input_shape=utils.model_input_shape),\n MaxPooling2D(pool_size=2),\n Conv2D(128, 5, activation='relu', padding='same'),\n MaxPooling2D(pool_size=2),\n Dropout(0.2),\n Conv2D(64, 3, activation='relu', padding='same'),\n MaxPooling2D(pool_size=2),\n Dropout(0.2),\n Flatten(),\n Dense(128, activation='relu'),\n Dense(4, activation='softmax')\n ])\n\n model.compile(\n 'adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'],\n )\n\n model.fit(\n train_iter,\n epochs=epochs,\n validation_data=validation_iter,\n )\n\n return model\n\ndef main(data, epochs):\n data_gen = utils.prepare_data()\n\n data_folder = pathlib.Path(data)\n model_folder = pathlib.Path(data).parent / (data_folder.name + \"_model\")\n\n model = train(data_gen, str(data_folder), epochs)\n model.save(str(model_folder))\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Please supply data directory as argument\")\n exit(-1)\n\n main(sys.argv[1], 10)\n", "repo_name": "brendanburkhart/gesture-detection", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1672, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "utils.load_options", "line_number": 13, "usage_type": "attribute"}, {"api_name": "utils.load_options", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.model_input_shape", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.prepare_data", "line_number": 52, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 54, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 65, "usage_type": "attribute"}]}
+{"seq_id": "15438963931", "text": "import json\n\nfrom django import forms\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.utils.translation import gettext as _\n\nfrom raw_materials.models import RawMaterial\nfrom sales.models import MaterialSaleRelation\nfrom datetime import date, datetime\n\nclass SelectPredictionForm(forms.Form):\n\n date = forms.DateField( # datetime.date\n label=_(\"Fecha de prediccion\"),\n required=True\n )\n\n raw_materials = forms.ModelMultipleChoiceField(\n label=_(\"Escoge las materias primas a predecir\"),\n queryset=None,\n required=True\n )\n\n def __init__(self, *args, **kwargs):\n company = kwargs.pop('company')\n self.request = kwargs.pop('request')\n super(SelectPredictionForm, self).__init__(*args, **kwargs)\n\n self.fields['raw_materials'].queryset = RawMaterial.objects.filter(\n company=company.pk,\n )\n\n def clean(self):\n errors = []\n data = super(SelectPredictionForm, self).clean()\n\n try:\n date_string_error = data['date'].strftime(\"%m/%d/%Y\")\n date_string = data['date'].strftime(\"%Y-%m-%d\")\n except (Exception, KeyError) as ex:\n raise forms.ValidationError(\n _('Se introdujo una fecha no valida.'),\n code='invalid',\n )\n\n\n if data['date'] < datetime.now().date():\n errors.append(forms.ValidationError(\n _('Error de fecha se escogio la fecha %(value)s, la cual es del pasado.'),\n code='invalid',\n params={\n 'value': date_string_error,\n },\n ))\n\n for material in data['raw_materials']:\n sales_relation_count = MaterialSaleRelation.objects.filter(\n raw_material=material.pk\n ).count()\n\n if sales_relation_count == 0:\n errors.append(forms.ValidationError(\n _('La materia prima %(value)s no ha formado parte de ninguna compra.'),\n code='invalid',\n params={\n 'value': material.name,\n },\n ))\n elif sales_relation_count == 1:\n errors.append(forms.ValidationError(\n _('La materia prima %(value)s se ha comprado una unica vez, siendo no apta para predicciones.'),\n code='invalid',\n params={\n 'value': material.name,\n },\n ))\n\n if errors:\n raise forms.ValidationError(errors)\n\n self.request.session['prediction_date'] = date_string\n self.request.session['prediction_raw_materials'] = json.dumps(\n list(\n data['raw_materials'].values(\n 'name',\n 'pk',\n )\n ),\n cls=DjangoJSONEncoder\n )\n self.request.session['prediction_to_save'] = True\n\n return data\n\n class Meta:\n pass\n", "repo_name": "LuisReyes98/estimator", "sub_path": "estimator/predictions/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3084, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "django.forms.Form", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.DateField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 14, "usage_type": "call"}, {"api_name": "raw_materials.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 19, "usage_type": "call"}, {"api_name": "raw_materials.models.RawMaterial.objects.filter", "line_number": 29, "usage_type": "call"}, {"api_name": "raw_materials.models.RawMaterial.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "raw_materials.models.RawMaterial", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 41, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 41, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 48, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 48, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 49, "usage_type": "call"}, {"api_name": "sales.models.MaterialSaleRelation.objects.filter", "line_number": 57, "usage_type": "call"}, {"api_name": "sales.models.MaterialSaleRelation.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sales.models.MaterialSaleRelation", "line_number": 57, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 62, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 62, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 63, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 70, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 70, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 71, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 79, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 79, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "django.core.serializers.json.DjangoJSONEncoder", "line_number": 89, "usage_type": "name"}]}
+{"seq_id": "42294654417", "text": "from unittest.mock import MagicMock\nfrom uuid import UUID\n\nimport pytest\nfrom fastapi import HTTPException\n\nfrom coffee_backend.exceptions.exceptions import ObjectNotFoundError\nfrom coffee_backend.services.coffee_image import ImageService\nfrom tests.conftest import DummyImages\n\n\n@pytest.mark.asyncio\nasync def test_coffee_image_service_get_coffee_image(\n dummy_coffee_images: DummyImages,\n) -> None:\n \"\"\"Test the CoffeeImagesService get_coffee_image method for retrieving a\n coffee image.\n\n Args:\n dummy_coffee_images (DummyImages): An instance providing dummy coffee\n image data.\n\n \"\"\"\n coffee_image_crud = MagicMock()\n coffee_image_crud.read.return_value = (\n dummy_coffee_images.image_1_bytes,\n \"jpg\",\n )\n\n coffe_uuid = UUID(\"123e4567-e19b-12d3-a456-426655440000\")\n\n test_coffee_service = ImageService(coffee_images_crud=coffee_image_crud)\n\n result = test_coffee_service.get_coffee_image(coffe_uuid)\n\n assert coffee_image_crud.read.call_count == 1\n\n coffee_image_crud.read.assert_called_once_with(\n \"123e4567-e19b-12d3-a456-426655440000\"\n )\n\n assert result == (dummy_coffee_images.image_1_bytes, \"jpg\")\n\n\n@pytest.mark.asyncio\nasync def test_coffee_image_service_get_coffee_image_object_not_found(\n dummy_coffee_images: DummyImages,\n) -> None:\n \"\"\"Test the CoffeeImagesService get_coffee_image method when the object is\n not found.\n\n Args:\n dummy_coffee_images (DummyImages): An instance providing dummy coffee\n image data.\n\n Raises:\n pytest.raises(HTTPException): An HTTPException should be raised when\n the requested coffee image is not found in the S3 bucket.\n\n \"\"\"\n\n coffee_image_crud = MagicMock()\n coffee_image_crud.read.side_effect = ObjectNotFoundError(\n message=\"Object not found\"\n )\n\n coffe_uuid = UUID(\"123e4567-e19b-12d3-a456-426655440000\")\n\n test_coffee_service = ImageService(coffee_images_crud=coffee_image_crud)\n\n with pytest.raises(HTTPException):\n test_coffee_service.get_coffee_image(coffe_uuid)\n", "repo_name": "andifg/coffee_backend", "sub_path": "tests/services/coffee_image/test_coffee_image_read.py", "file_name": "test_coffee_image_read.py", "file_ext": "py", "file_size_in_byte": 2088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "tests.conftest.DummyImages", "line_number": 14, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 24, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 30, "usage_type": "call"}, {"api_name": "coffee_backend.services.coffee_image.ImageService", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tests.conftest.DummyImages", "line_number": 47, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 62, "usage_type": "call"}, {"api_name": "coffee_backend.exceptions.exceptions.ObjectNotFoundError", "line_number": 63, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 67, "usage_type": "call"}, {"api_name": "coffee_backend.services.coffee_image.ImageService", "line_number": 69, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 71, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 71, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 45, "usage_type": "attribute"}]}
+{"seq_id": "9081447525", "text": "import time\nfrom datetime import datetime, timedelta\nimport picamera\n\n## expects interval in seconds\ndef timelapse(interval, duration, rotation=0):\n count = 0\n with picamera.PiCamera() as cam:\n cam.rotation = rotation\n cam.start_preview()\n time.sleep(2)\n for filename in cam.capture_continuous('{timestamp:%Y-%m-%d-%H-%M}-tc{counter:02d}.jpg'):\n count += 1\n if count > (duration / interval):\n return count\n time.sleep(interval)\n return\n\n## expects interval in hours\ndef timelapse2(interval, duration, rotation=0):\n count = 0\n mx = int(duration / interval)\n interval = interval * 3600\n while count <= mx:\n with picamera.PiCamera() as cam:\n cam.rotation = rotation\n cam.start_preview()\n cam.capture('{}-tc{:03d}.jpg'.format(datetime.now(), count))\n count += 1\n time.sleep(interval)\n", "repo_name": "gvelonis/pi-tc", "sub_path": "libpitc.py", "file_name": "libpitc.py", "file_ext": "py", "file_size_in_byte": 935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "picamera.PiCamera", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "picamera.PiCamera", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "31624204421", "text": "# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %% [markdown]\n# # Exploratory Data Analysis 0: Initial Data Analysis\n# \n# Initial data analysis is a subset of exploratory data analysis which focuses on making the data fit to be put into a model. This means dealing with non-existant values, normalizing as necescary, and completing other tasks as necescary by the final model to be used.\n# \n# Sources:\n# https://reader.elsevier.com/reader/sd/pii/S0022522315017948?token=E85E57F81B03A15524B9F114673CAF3F3F0FF45188AA953EB7FDD8195887A04325990D11A24383AC4424F669BB95EDAE\n# \n# https://towardsdatascience.com/dealing-with-missing-data-17f8b5827664 \n# \n# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3668100/\n\n# %%\nimport pandas as pd \nimport seaborn as sns\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport pathlib\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n\n# %%\n# Initializing data\nvg_df = pd.read_csv(pathlib.Path('vgsales.csv'))\n\n\n# %%\n\nvg_df.dtypes\nvg_df.describe()\n\nInteractiveShell.ast_node_interactivity = \"last_expr\"\n\n# %% [markdown]\n# ### Check for duplicates\n\n# %%\n# Check for duplicates\nduplicate_bool_ser = vg_df.duplicated(keep = False)\n\nduplicate_bool_ser[duplicate_bool_ser == True]\n\n# %% [markdown]\n# ### Check for Nonexistant Values/Nones (grouped together as nulls) and other unknowns\n\n# %%\n# Plotting the locations of the NaNs by row\n\nnan_locs = vg_df[vg_df.isnull().any(axis = 1)].index.tolist()\n\nplt.bar(nan_locs, 1, width = 10)\nplt.title(\"Null Values by Row\")\nplt.show()\n\n# Binned by thousands\n\nplt.hist(np.array(nan_locs), bins = 17, range = (0.0, 17000.0))\nplt.title('Occurences of NaN in Dataframe binned by 1000s')\nplt.xlabel('Row Number')\nplt.ylabel('Number of Occurences within Each Bin of 1000 rows')\nplt.show()\n\n# Table of where the NaNs are located\n\nNaN_count_col_df = pd.DataFrame()\nfor col in vg_df.columns:\n NaN_count_col_df[f'NaNs_in_{col}'] = [vg_df[col].isnull().values.sum()]\nNaN_count_col_df\n\n\n# %%\nInteractiveShell.ast_node_interactivity = \"all\"\n\n# Found synonyms of null, unknown, nonexistant, n/a, not any etc that might be present in a dataset\n\nnull_synonyms = ['unknown', 'untold', 'undetermined', 'undefined', 'hidden', 'indefinite', 'pending', 'inconclusive', 'unnamed', 'undesignated', 'insignificant', 'nonexistant', 'non-existant', 'missing', 'absent', 'unavailable', 'nonexistent', 'withdrawn', 'null', 'invalid', 'void', 'rescinded', 'repealed', 'blank', 'empty', 'canceled', 'revoked', 'rescinded', 'not any', 'n/a', 'None', 'nan', 'excluded',]\n\n# Select columns with dtype 'object' and converting all strings to lowercase\nobj_cols = ['Name', 'Platform', 'Genre', 'Publisher']\n\nobj_vg_df = pd.DataFrame(dtype = 'object')\nfor col in obj_cols:\n obj_vg_df[col] = vg_df[col].str.lower()\n\n# Recording where the word occus\n\nfor word in null_synonyms:\n for col in obj_cols:\n if True in (obj_vg_df[col] == word).values:\n print('Word: ',word)\n print('in Column: ',col)\n np.array(obj_vg_df.index[obj_vg_df[col] == word].tolist())\n # The output list tells us that the only occurence of a synonym of null was 'unknown' in the 'publisher column'\n\n# Plotting the locations of the 'unknown's by row\n\nunknown_indices = obj_vg_df.index[obj_vg_df['Publisher'] == 'unknown'].tolist()\n\nplt.bar(unknown_indices, 1, width = 10)\nplt.title(\"Unknown Values by Row\")\nplt.show()\n\n# Binned by thousands\n\nplt.hist(np.array(unknown_indices), bins = 17, range = (0.0, 17000.0))\nplt.title('Occurences of \"unknown\" in \"publisher\" column binned by 1000s')\nplt.xlabel('Row Number')\nplt.ylabel('Number of Occurences within Each Bin of 1000 rows')\nplt.show()\n\n# %% [markdown]\n# ### How to deal with these NaN and unknown values?\n# \n# Simple options:\n# * Delete the features with NaN and unknown values from the dataset entirely\n# * Delete rows with those features missing\n# * Delete the chunk of rows with those features missing\n# * Turn NaN/unknown into a category\n# * Eg., if options for publishers are 'Nintendo', 'Sega' etc add a new option of 'unknown'\n# * Replace with mean, median or mode\n# \n# From Kang (2013):\n# * \n", "repo_name": "AaDalal/senior_sem_ai", "sub_path": "dataExploration0_IDA_backup.py", "file_name": "dataExploration0_IDA_backup.py", "file_ext": "py", "file_size_in_byte": 4211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "IPython.core.interactiveshell.InteractiveShell.ast_node_interactivity", "line_number": 23, "usage_type": "attribute"}, {"api_name": "IPython.core.interactiveshell.InteractiveShell", "line_number": 23, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "IPython.core.interactiveshell.InteractiveShell.ast_node_interactivity", "line_number": 36, "usage_type": "attribute"}, {"api_name": "IPython.core.interactiveshell.InteractiveShell", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "IPython.core.interactiveshell.InteractiveShell.ast_node_interactivity", "line_number": 76, "usage_type": "attribute"}, {"api_name": "IPython.core.interactiveshell.InteractiveShell", "line_number": 76, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}]}
+{"seq_id": "14269491806", "text": "from typing import Optional\nimport re\n\nfrom autotraders.error import SpaceTradersException\nfrom autotraders.paginated_list import PaginatedList\nfrom autotraders.shared_models.waypoint_symbol import WaypointSymbol\nfrom autotraders.space_traders_entity import SpaceTradersEntity\nfrom autotraders.session import AutoTradersSession\n\n\nclass Agent(SpaceTradersEntity):\n contracts: Optional[PaginatedList]\n starting_faction: str\n symbol: str\n account_id: str\n credits: int\n ship_count: int\n ships: Optional[PaginatedList]\n headquarters: WaypointSymbol\n\n def __init__(\n self, session: AutoTradersSession, symbol=None, data: Optional[dict] = None\n ):\n \"\"\"\n :param symbol: If it's None, then the agent associated with the token will be retrieved.\n Otherwise, the specified agent will be retrieved.\n \"\"\"\n if symbol is None:\n super().__init__(session, \"my/agent\", data)\n else:\n super().__init__(session, \"agents/\" + symbol, data)\n\n def update(self, data: Optional[dict] = None):\n data = super()._update(data)\n mappings = {\n \"account_id\": {\"type\": None, \"class\": str, \"alias\": \"accountId\"},\n \"symbol\": {\"type\": None, \"class\": str, \"optional\": False},\n \"headquarters\": {\"type\": None, \"class\": WaypointSymbol},\n \"credits\": {\"type\": None, \"class\": int, \"optional\": False},\n \"starting_faction\": {\n \"type\": None,\n \"class\": str,\n \"alias\": \"startingFaction\",\n },\n \"ship_count\": {\n \"type\": None,\n \"class\": int,\n \"alias\": \"shipCount\",\n \"optional\": True,\n },\n }\n super().update_attr(mappings, data)\n\n @staticmethod\n def create(session, faction, symbol, email, override_email_check=False):\n def check_email(e):\n return re.fullmatch(r\"^[\\w-\\.]+@([\\w-]+\\.)+[\\w-]{2,4}$\", e)\n\n if not override_email_check and not (email is None or check_email(email)):\n raise ValueError(\n email\n + \" is not a valid email. Use override_email_check=True to bypass this error.\"\n )\n r = session.post(\n session.b_url + \"register\",\n json={\n \"faction\": faction.upper(),\n \"symbol\": symbol,\n \"email\": email,\n },\n )\n j = r.json()\n if \"error\" in j:\n raise SpaceTradersException(\n j[\"error\"], r.url, r.status_code, r.request.headers, r.headers\n )\n return j[\"data\"][\"token\"]\n\n @staticmethod\n def all(session, page: int = 1) -> PaginatedList:\n def paginated_func(p, num_per_page):\n r = session.get(\n session.b_url + \"agents?limit=\" + str(num_per_page) + \"&page=\" + str(p)\n )\n j = r.json()\n if \"error\" in j:\n raise SpaceTradersException(\n j[\"error\"], r.url, r.status_code, r.request.headers, r.headers\n )\n agents = []\n for agent in j[\"data\"]:\n a = Agent(session, agent[\"symbol\"], agent)\n agents.append(a)\n return agents, r.json()[\"meta\"][\"total\"]\n\n return PaginatedList(paginated_func, page)\n", "repo_name": "cosmictraders/autotraders", "sub_path": "autotraders/agent.py", "file_name": "agent.py", "file_ext": "py", "file_size_in_byte": 3386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "85", "api": [{"api_name": "autotraders.space_traders_entity.SpaceTradersEntity", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 12, "usage_type": "name"}, {"api_name": "autotraders.paginated_list.PaginatedList", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "autotraders.paginated_list.PaginatedList", "line_number": 18, "usage_type": "name"}, {"api_name": "autotraders.shared_models.waypoint_symbol.WaypointSymbol", "line_number": 19, "usage_type": "name"}, {"api_name": "autotraders.session.AutoTradersSession", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 33, "usage_type": "name"}, {"api_name": "autotraders.shared_models.waypoint_symbol.WaypointSymbol", "line_number": 38, "usage_type": "name"}, {"api_name": "re.fullmatch", "line_number": 57, "usage_type": "call"}, {"api_name": "autotraders.error.SpaceTradersException", "line_number": 74, "usage_type": "call"}, {"api_name": "autotraders.error.SpaceTradersException", "line_number": 87, "usage_type": "call"}, {"api_name": "autotraders.paginated_list.PaginatedList", "line_number": 96, "usage_type": "call"}, {"api_name": "autotraders.paginated_list.PaginatedList", "line_number": 80, "usage_type": "name"}]}
+{"seq_id": "41508639112", "text": "from cryptography.fernet import Fernet\n\n\ndef get_encryption_key(service):\n from models import SpotifyProfile, TwitterProfile\n\n if service == 'twitter':\n key = TwitterProfile.get_credentials()[3]\n elif service == 'spotify':\n key = SpotifyProfile.get_credentials()[3]\n\n return key\n\n\ndef encrypt_token(value, service):\n key = get_encryption_key(service)\n\n value = value.encode()\n\n f = Fernet(key)\n value = f.encrypt(value)\n\n return value.decode()\n\n\ndef decrypt_token(value, service):\n key = get_encryption_key(service)\n\n value = value.encode()\n\n f = Fernet(key)\n value = f.decrypt(value)\n\n return value.decode()\n", "repo_name": "jaimescose/stopify", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "models.TwitterProfile.get_credentials", "line_number": 8, "usage_type": "call"}, {"api_name": "models.TwitterProfile", "line_number": 8, "usage_type": "name"}, {"api_name": "models.SpotifyProfile.get_credentials", "line_number": 10, "usage_type": "call"}, {"api_name": "models.SpotifyProfile", "line_number": 10, "usage_type": "name"}, {"api_name": "cryptography.fernet.Fernet", "line_number": 20, "usage_type": "call"}, {"api_name": "cryptography.fernet.Fernet", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "39171739265", "text": "# steth_controller.py\n# Coordinates all the digital stethescope components\n\n### Imports ###\n\n# Built-ins\nimport datetime\nimport logging\nimport os\nimport sys\nimport threading\nfrom time import sleep\n\n# Local imports\nfrom data_classifier import DataClassifier\nfrom data_collection import BluetoothController\nfrom data_preproc import DataPreproc\nfrom interface_api import Interface_API\nfrom peak_detector import PeakDetector\nfrom analysis_controller import AnalysisController\n\n### Globals ###\n\nLOGGING_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'logs')\nDATA_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')\n\n### File Checks ###\n\nif not os.path.isdir(LOGGING_DIR):\n os.mkdir(LOGGING_DIR)\n\nif not os.path.isdir(DATA_DIR):\n os.mkdir(DATA_DIR)\n\n### Logging Configuration ###\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"[%(levelname)s] %(asctime)s - %(name)s - %(message)s\",\n datefmt='%d/%m/%Y %H:%M:%S',\n handlers=[\n logging.FileHandler(os.path.join(LOGGING_DIR, 'python.log')),\n logging.StreamHandler(sys.stdout)\n ])\n\nLOGGER = logging.getLogger(\"controller\")\n\n### Classes ###\n\nclass StethescopeController():\n def __init__(self):\n # Child modules for handling various components\n LOGGER.info(\"Creating modules...\")\n self.data_classifier_module = DataClassifier(self)\n self.data_collection_module = BluetoothController(self) \n self.data_preproc = DataPreproc(self)\n self.interface = Interface_API(self)\n self.peak_detector_module = PeakDetector(self)\n self.analysis_peak_detector = PeakDetector(self)\n self.analysis_controller = AnalysisController(self)\n \n # General class variables\n self.child_threads = []\n self.data_dir = DATA_DIR\n self.ecg_file_name = None\n self.mic_file_name = None\n\n self.ecg_save_file_name = None\n self.mic_save_file_name = None\n self.target_save_data_dir = None\n\n # Control signals\n self.start_analysis = False\n self.enable_bt_search = False\n self.collect_bt_data = False\n \n # Data structures for shared information\n self.raw_data_stream = None\n self.ecg_data = None\n self.mic_data = None\n\n def start_listening(self):\n LOGGER.info(\"Spawning child threads...\")\n interface_api_thread = threading.Thread(\n target=self.interface.connect_to_interface, \n daemon=True)\n interface_api_thread.start()\n\n anal_controller_thread = threading.Thread(\n target=self.analysis_controller.start_controller, \n daemon=True) \n anal_controller_thread.start()\n \n while True:\n while not self.enable_bt_search:\n sleep(2)\n result = self.data_collection_module.search_for_device()\n self.enable_bt_search = False\n\n if result:\n LOGGER.info(\"Connected to BT\")\n self.interface.send_bt_status(result, DATA_DIR)\n else:\n LOGGER.info(\"Failed to connect to BT\")\n self.interface.send_bt_status(result, DATA_DIR)\n continue\n\n while not self.collect_bt_data:\n if self.enable_bt_search: break\n sleep(2)\n\n if self.collect_bt_data:\n LOGGER.info(\"Collecting BT data now...\")\n self.data_collection_module.connect_and_listen()\n \n LOGGER.info(\"Data pipe closed\")\n\n### Main ###\n\nif __name__ == \"__main__\":\n # Code for testing the entire system\n stethescope = StethescopeController()\n \n try:\n stethescope.start_listening() \n except KeyboardInterrupt:\n LOGGER.info(\"Listening stopped by user.\")", "repo_name": "AZRehkopf/stethescope_lads", "sub_path": "python/steth_controller.py", "file_name": "steth_controller.py", "file_ext": "py", "file_size_in_byte": 3836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 43, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 46, "usage_type": "call"}, {"api_name": "data_classifier.DataClassifier", "line_number": 54, "usage_type": "call"}, {"api_name": "data_collection.BluetoothController", "line_number": 55, "usage_type": "call"}, {"api_name": "data_preproc.DataPreproc", "line_number": 56, "usage_type": "call"}, {"api_name": "interface_api.Interface_API", "line_number": 57, "usage_type": "call"}, {"api_name": "peak_detector.PeakDetector", "line_number": 58, "usage_type": "call"}, {"api_name": "peak_detector.PeakDetector", "line_number": 59, "usage_type": "call"}, {"api_name": "analysis_controller.AnalysisController", "line_number": 60, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 84, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 89, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 110, "usage_type": "call"}]}
+{"seq_id": "24720606894", "text": "import sys\nimport time\nimport torch\nimport copy\n\ndef train(model, dataLoaders, criterion, optimizer, device, nEpochs=25):\n since = time.time()\n trainHistory, valHistory = [], []\n bestWeights = copy.deepcopy(model.state_dict())\n bestAccuracy = 0.0\n bestEpoch = 0\n\n print('Training')\n\n for epoch in range(nEpochs):\n print('Epoch {}/{}'.format(epoch, nEpochs - 1))\n print('-' * 10)\n\n for phase in ['training', 'validation']:\n if phase == 'training':\n model.train()\n else:\n model.eval()\n\n runningLoss, runningCorrects = 0.0, 0\n\n batch = 0\n print('{} batch'.format(phase), end=' ')\n for inputs, labels in dataLoaders[phase]:\n if batch % 10 == 0:\n print('{}/{}'.format(batch, len(dataLoaders[phase])), end=' ')\n sys.stdout.flush()\n batch += 1\n if batch % 200 == 0:\n print()\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n # Forward pass\n with torch.set_grad_enabled(phase == 'training'):\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n _, predictions = torch.max(outputs, 1)\n\n # Backward pass if needed\n if phase == 'training':\n loss.backward()\n optimizer.step()\n\n runningLoss += loss.item() * inputs.size(0)\n runningCorrects += torch.sum(predictions == labels.data)\n\n nSamples = len(dataLoaders[phase].sampler.indices)\n epochLoss = runningLoss / nSamples\n epochAccuracy = float(runningCorrects) / nSamples\n\n print('\\n{} loss: {:.4f}; accuracy: {:.4f}'.format(phase, epochLoss, epochAccuracy), end='')\n\n if phase == 'training':\n trainHistory.append(epochAccuracy)\n print()\n elif phase == 'validation':\n if epochAccuracy > bestAccuracy:\n bestAccuracy = epochAccuracy\n bestEpoch = epoch\n bestWeights = copy.deepcopy(model.state_dict())\n valHistory.append(epochAccuracy)\n print(' (best validation accuracy so far is {:.4f} after epoch {})'.format(bestAccuracy, bestEpoch))\n\n print()\n\n timeElapsed = time.time() - since\n print('Training time {:.0f}m {:.0f}s'.format(timeElapsed // 60, timeElapsed % 60))\n print('Best validation accuracy: {:4f}'.format(bestAccuracy))\n\n model.load_state_dict(bestWeights)\n return model, trainHistory, valHistory\n\n\ndef test(model, testLoader, device):\n print('Testing')\n model.eval()\n corrects = 0\n\n allLabels, allPredictions = [], []\n batch = 0\n for inputs, labels in testLoader:\n if batch % 10 == 0:\n print('{}/{}'.format(batch, len(testLoader)), end=' ')\n sys.stdout.flush()\n batch += 1\n if batch % 200 == 0:\n print()\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n allLabels.extend(labels.tolist())\n\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n _, predictions = torch.max(outputs, 1)\n allPredictions.extend(predictions.tolist())\n\n corrects += torch.sum(predictions == labels.data)\n\n nSamples = len(testLoader.sampler.indices)\n accuracy = float(corrects) / nSamples\n\n print('\\nTest accuracy on {} samples: {:.4f}'.format(nSamples, accuracy))\n\n return accuracy, allLabels, allPredictions\n", "repo_name": "Oliver-ss/Applying-machine-learning-to-investigate-long-term-insect-plant-interactions", "sub_path": "Classification/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "time.time", "line_number": 7, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.set_grad_enabled", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 55, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.set_grad_enabled", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 108, "usage_type": "call"}]}
+{"seq_id": "13132433899", "text": "import collections\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass List:\n def __init__(self, values):\n self.head = ListNode(values[0])\n cur = self.head\n for v in values[1:]:\n cur.next = ListNode(v)\n cur = cur.next\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, key):\n self.key = key\n self.left = None\n self.right = None\n\n def __repr__(self):\n return str(self.key)\n\n\nclass Tree:\n def __init__(self, nums):\n start, end = 0, 1\n level_nums = []\n while end <= len(nums):\n curs = nums[start:end]\n level_nums.append(curs)\n curs_not_null = [n for n in curs if n]\n start += len(curs)\n end += len(curs_not_null)*2\n\n i = 1\n parents = [TreeNode(n) for n in level_nums[0]]\n while i < len(level_nums) and parents:\n children = [TreeNode(n) if n else None for n in level_nums[i]]\n j = 0\n tmp_parents = []\n for parent in parents:\n if i == 1:\n self._root = parent\n parent.left = children[j]\n parent.right = children[j+1]\n if parent.left:\n tmp_parents.append(parent.left)\n if parent.right:\n tmp_parents.append(parent.right)\n j += 2\n parents = tmp_parents\n i += 1\n\n @property\n def root(self):\n return self._root\n\n\nclass TreeDeserialize:\n def __init__(self, data):\n if data == \"[]\":\n return\n vals, i = data[1:-1].split(','), 1\n self._root = TreeNode(int(vals[0]))\n queue = collections.deque()\n queue.append(self._root)\n while queue:\n node = queue.popleft()\n if vals[i] != \"null\":\n node.left = TreeNode(int(vals[i]))\n queue.append(node.left)\n i += 1\n if vals[i] != \"null\":\n node.right = TreeNode(int(vals[i]))\n queue.append(node.right)\n i += 1\n\n @property\n def root(self):\n return self._root\n", "repo_name": "ForeverDreamer/data-structures-and-algorithms_peking-university", "sub_path": "笔试算法题/力扣/剑指 Offer(第 2 版)/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "collections.deque", "line_number": 71, "usage_type": "call"}]}
+{"seq_id": "37858925026", "text": "'''\nCreated on Feb 22, 2019\n\n@author: daniel\n'''\nfrom morphological_skeleton_transform import reconstruct_image, compute_skeleton_subsets\nimport sys\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n\ndef main():\n B = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))\n plt.imshow(B)\n plt.gray()\n plt.title(\"Structuring Element\")\n plt.show()\n if len(sys.argv[1:]) == 0:\n sys.argv[1:] = [\"Data/deer.png\", \"Data/dog.png\", \"Data/butterfly.png\", \"Data/lamp.png\", \"Data/fish.png\"]\n\n imgs = []\n for arg in sys.argv[1:]:\n imgs.append(cv2.imread(arg))\n for X in imgs:\n\n # preprocessing step...\n X = np.bitwise_not(X)\n X = cv2.cvtColor(X, cv2.COLOR_BGR2GRAY)\n _, X = cv2.threshold(X, 0, 255, cv2.THRESH_OTSU)\n\n S = compute_skeleton_subsets(X, B)\n\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, len(S) + 1, 1)\n plt.imshow(X)\n plt.title(\"Original Image\")\n plt.axis('off')\n for i, s in enumerate(S):\n fig.add_subplot(1, len(S) + 1, i + 2)\n plt.imshow(s)\n plt.title(\"$S_{\" + str(i) + \"}$\")\n plt.axis('off')\n plt.show()\n\n (components, reconstructions) = reconstruct_image(S, X, B)\n\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, len(components) + 1, 1)\n plt.imshow(X)\n plt.title(\"Original Image\")\n plt.axis('off')\n\n for n, component in enumerate(components):\n fig.add_subplot(1, len(components) + 1, n + 2)\n title = \"$S_{\" + str(n) + \"}\"\n\n if n > 1:\n title = title + \"\\oplus \" + str(n) + \"B\"\n else:\n title = title + \"\\oplus B\"\n title = title + \"$\"\n plt.imshow(component)\n plt.title(title)\n plt.axis('off')\n plt.show()\n\n fig = plt.figure()\n fig.add_subplot(121)\n plt.imshow(X)\n plt.title('Original Image')\n plt.axis('off')\n fig.add_subplot(122)\n plt.imshow(reconstructions[-1])\n plt.title('Reconstruction')\n plt.axis('off')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n exit()\n", "repo_name": "danielenricocahall/Morphological-Skeleton-Transform", "sub_path": "driver.py", "file_name": "driver.py", "file_ext": "py", "file_size_in_byte": 2232, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "cv2.getStructuringElement", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.MORPH_CROSS", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.bitwise_not", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 31, "usage_type": "attribute"}, {"api_name": "morphological_skeleton_transform.compute_skeleton_subsets", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "morphological_skeleton_transform.reconstruct_image", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}]}
+{"seq_id": "19912584648", "text": "import torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom PIL import Image\n\nfrom dataset.caption_dataset import re_train_dataset, re_eval_dataset, pretrain_dataset_4m, coco_dataset, nocaps_dataset\nfrom dataset.nlvr_dataset import nlvr_dataset\nfrom dataset.ve_dataset import ve_dataset\nfrom dataset.vqa_dataset import vqa_dataset\nfrom dataset.grounding_dataset import build_uni_training_dataset,build_vg_dataset\nfrom dataset.videoqa_dataset import videoqa_dataset\nfrom dataset.video_dataset import vatex_video_caps_dataset\n\nfrom dataset.randaugment import RandomAugment\n\ndef create_dataset(dataset, config, epoch=None):\n \n normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n \n pretrain_transform = transforms.Compose([ \n transforms.RandomResizedCrop(config['image_res'],scale=(0.2, 1.0), interpolation=Image.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n train_transform = transforms.Compose([ \n transforms.RandomResizedCrop(config['image_res'],scale=(0.5, 1.0), interpolation=Image.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n test_transform = transforms.Compose([\n transforms.Resize((config['image_res'],config['image_res']),interpolation=Image.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ]) \n \n if dataset=='pretrain':\n dataset = pretrain_dataset_4m(config['train_file'], pretrain_transform, read_local_data=config['read_local_data'], image_root=config['image_root'], epoch=epoch)\n return dataset\n\n elif dataset=='re':\n train_dataset = re_train_dataset(config['train_file'], train_transform, config['image_root'])\n val_dataset = re_eval_dataset(config['val_file'], test_transform, config['image_root'])\n test_dataset = re_eval_dataset(config['test_file'], test_transform, config['image_root'])\n return train_dataset, val_dataset, test_dataset\n \n elif dataset=='vqa': \n train_dataset = vqa_dataset(config['train_file'], train_transform, config['vqa_root'], config['vg_root'], config['gqa_root'], split='train', read_local_data=config['read_local_data'], add_ocr=config['add_ocr'], add_object=config['add_object']) \n vqa_test_dataset = vqa_dataset(config['test_file'], test_transform, config['vqa_root'], config['vg_root'], config['gqa_root'], split='test', answer_list=config['answer_list'], read_local_data=config['read_local_data'], add_ocr=config['add_ocr'], add_object=config['add_object']) \n vqa_val_dataset = vqa_dataset(config['val_file'], test_transform, config['vqa_root'], config['vg_root'], config['gqa_root'],split='test', answer_list=config['answer_list'], read_local_data=config['read_local_data'], add_ocr=config['add_ocr'], add_object=config['add_object']) \n return train_dataset, vqa_val_dataset, vqa_test_dataset\n elif dataset== 'nocaps':\n val_dataset = nocaps_dataset(config['val_file'], test_transform, config['nocaps_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, add_object=config['add_object'])\n test_dataset = nocaps_dataset(config['test_file'], test_transform, config['nocaps_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, add_object=config['add_object'])\n return val_dataset, test_dataset\n elif dataset== 'coco':\n train_dataset = coco_dataset(config['train_file'], train_transform, config['coco_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=True, add_object=config['add_object'])\n val_dataset = coco_dataset(config['val_file'], test_transform, config['coco_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, add_object=config['add_object'])\n test_dataset = coco_dataset(config['test_file'], test_transform, config['coco_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, add_object=config['add_object'])\n return train_dataset, val_dataset, test_dataset\n elif dataset=='nlvr': \n train_dataset = nlvr_dataset(config['train_file'], train_transform, config['image_root']) \n val_dataset = nlvr_dataset(config['val_file'], test_transform, config['image_root']) \n test_dataset = nlvr_dataset(config['test_file'], test_transform, config['image_root']) \n return train_dataset, val_dataset, test_dataset \n \n elif dataset=='ve': \n train_dataset = ve_dataset(config['train_file'], train_transform, config['image_root']) \n val_dataset = ve_dataset(config['val_file'], test_transform, config['image_root']) \n test_dataset = ve_dataset(config['test_file'], test_transform, config['image_root']) \n return train_dataset, val_dataset, test_dataset \n\n elif 'vg_' in dataset:\n if 'uni' in dataset:\n train_dataset = build_uni_training_dataset(args=config)\n val_dataset = build_vg_dataset(split='val',args=config,dataset_name='unc')\n eval_dataset = 'unc'\n else:\n train_dataset = build_vg_dataset(split='train',args=config,dataset_name=dataset[3:])\n val_dataset = build_vg_dataset(split='val',args=config,dataset_name=dataset[3:])\n eval_dataset = dataset[3:]\n eval_split = {\n 'unc':['testA','testB'],\n 'unc+':['testA','testB'],\n 'gref_umd':['test']\n }\n test_datasets = {split:build_vg_dataset(split=split,args=config,dataset_name=eval_dataset) for split in eval_split[eval_dataset]}\n return train_dataset, val_dataset,test_datasets\n \n\n\n elif dataset=='video_qa': \n train_dataset = videoqa_dataset(config['train_file'], train_transform, config['videoqa_root'], split='train', read_local_data=config['read_local_data'], max_img_size=config['image_res']) \n vqa_test_dataset = videoqa_dataset(config['test_file'], test_transform, config['videoqa_root'], split='test', answer_list=config['answer_list'], read_local_data=config['read_local_data'], max_img_size=config['image_res']) \n vqa_val_dataset = videoqa_dataset(config['val_file'], test_transform, config['videoqa_root'], split='test', answer_list=config['answer_list'], read_local_data=config['read_local_data'], max_img_size=config['image_res']) \n return train_dataset, vqa_val_dataset, vqa_test_dataset\n\n elif dataset== 'vatex_video_caps':\n test_dataset = vatex_video_caps_dataset(config['test_file'], config['vatex_video_caps_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, num_frm=config['num_frm_test'], max_img_size=config['image_res'], frm_sampling_strategy='uniform')\n return test_dataset\n\ndef videoqa_collate_fn(batch):\n image_list, question_list, answer_list, n = [], [], [], []\n for image, question, answer in batch:\n image_list.append(image)\n question_list.append(question)\n answer_list.append(answer)\n n.append(1)\n return torch.stack(image_list,dim=0), question_list, answer_list, n\n\ndef vqa_collate_fn(batch):\n image_list, question_list, answer_list, weight_list, n = [], [], [], [], []\n for image, question, answer, weights in batch:\n image_list.append(image)\n question_list.append(question)\n weight_list += weights \n answer_list += answer\n n.append(len(answer))\n return torch.stack(image_list,dim=0), question_list, answer_list, torch.Tensor(weight_list), n\n\ndef nocaps_collate_fn(batch):\n image_list, image_id_list = [], []\n for image, image_id in batch:\n image_list.append(image)\n image_id_list.append(image_id)\n return torch.stack(image_list,dim=0), image_id_list\ndef coco_collate_fn(batch):\n image_list, caption_list, object_labels, image_id_list, gold_caption_list = [], [], [], [], []\n for image, caption, object_label, image_id, gold_caption in batch:\n image_list.append(image)\n caption_list.append(caption)\n image_id_list.append(image_id)\n gold_caption_list.append(gold_caption)\n object_labels.append(object_label)\n return torch.stack(image_list,dim=0), caption_list, object_labels, image_id_list, gold_caption_list\n\n\ndef create_sampler(datasets, shuffles, num_tasks, global_rank):\n samplers = []\n for dataset,shuffle in zip(datasets,shuffles):\n sampler = torch.utils.data.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle)\n samplers.append(sampler)\n return samplers \n\n\ndef create_loader(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):\n loaders = []\n for dataset,sampler,bs,n_worker,is_train,collate_fn in zip(datasets,samplers,batch_size,num_workers,is_trains,collate_fns):\n if is_train:\n shuffle = (sampler is None)\n drop_last = True\n else:\n shuffle = False\n drop_last = False\n loader = DataLoader(\n dataset,\n batch_size=bs,\n num_workers=n_worker,\n pin_memory=True,\n sampler=sampler,\n shuffle=shuffle,\n collate_fn=collate_fn,\n drop_last=drop_last,\n ) \n loaders.append(loader)\n return loaders \n", "repo_name": "alibaba/AliceMind", "sub_path": "mPLUG/dataset/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 10120, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1868, "dataset": "github-code", "pt": "85", "api": [{"api_name": "torchvision.transforms.Normalize", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 18, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 20, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 22, "usage_type": "name"}, {"api_name": "dataset.randaugment.RandomAugment", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 28, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 28, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 29, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "dataset.randaugment.RandomAugment", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 37, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "dataset.caption_dataset", "line_number": 42, "usage_type": "name"}, {"api_name": "dataset.caption_dataset", "line_number": 43, "usage_type": "name"}, {"api_name": "dataset.caption_dataset.pretrain_dataset_4m", "line_number": 43, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 44, "usage_type": "name"}, {"api_name": "dataset.caption_dataset", "line_number": 46, "usage_type": "name"}, {"api_name": "dataset.caption_dataset.re_train_dataset", "line_number": 47, "usage_type": "call"}, {"api_name": "dataset.caption_dataset.re_eval_dataset", "line_number": 48, "usage_type": "call"}, {"api_name": "dataset.caption_dataset.re_eval_dataset", "line_number": 49, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 52, "usage_type": "name"}, {"api_name": "dataset.vqa_dataset.vqa_dataset", "line_number": 53, "usage_type": "call"}, {"api_name": "dataset.vqa_dataset.vqa_dataset", "line_number": 54, "usage_type": "call"}, {"api_name": "dataset.vqa_dataset.vqa_dataset", "line_number": 55, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 57, "usage_type": "name"}, {"api_name": "dataset.caption_dataset.nocaps_dataset", "line_number": 58, "usage_type": "call"}, {"api_name": "dataset.caption_dataset.nocaps_dataset", "line_number": 59, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 61, "usage_type": "name"}, {"api_name": "dataset.caption_dataset.coco_dataset", "line_number": 62, "usage_type": "call"}, {"api_name": "dataset.caption_dataset.coco_dataset", "line_number": 63, "usage_type": "call"}, {"api_name": "dataset.caption_dataset.coco_dataset", "line_number": 64, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 66, "usage_type": "name"}, {"api_name": "dataset.nlvr_dataset.nlvr_dataset", "line_number": 67, "usage_type": "call"}, {"api_name": "dataset.nlvr_dataset.nlvr_dataset", "line_number": 68, "usage_type": "call"}, {"api_name": "dataset.nlvr_dataset.nlvr_dataset", "line_number": 69, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 72, "usage_type": "name"}, {"api_name": "dataset.ve_dataset.ve_dataset", "line_number": 73, "usage_type": "call"}, {"api_name": "dataset.ve_dataset.ve_dataset", "line_number": 74, "usage_type": "call"}, {"api_name": "dataset.ve_dataset.ve_dataset", "line_number": 75, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 78, "usage_type": "name"}, {"api_name": "dataset.caption_dataset", "line_number": 79, "usage_type": "name"}, {"api_name": "dataset.grounding_dataset.build_uni_training_dataset", "line_number": 80, "usage_type": "call"}, {"api_name": "dataset.grounding_dataset.build_vg_dataset", "line_number": 81, "usage_type": "call"}, {"api_name": "dataset.grounding_dataset.build_vg_dataset", "line_number": 84, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 84, "usage_type": "name"}, {"api_name": "dataset.grounding_dataset.build_vg_dataset", "line_number": 85, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 85, "usage_type": "name"}, {"api_name": "dataset.caption_dataset", "line_number": 86, "usage_type": "name"}, {"api_name": "dataset.grounding_dataset.build_vg_dataset", "line_number": 92, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 97, "usage_type": "name"}, {"api_name": "dataset.videoqa_dataset.videoqa_dataset", "line_number": 98, "usage_type": "call"}, {"api_name": "dataset.videoqa_dataset.videoqa_dataset", "line_number": 99, "usage_type": "call"}, {"api_name": "dataset.videoqa_dataset.videoqa_dataset", "line_number": 100, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 103, "usage_type": "name"}, {"api_name": "dataset.video_dataset.vatex_video_caps_dataset", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 140, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.utils.data.DistributedSampler", "line_number": 146, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 146, "usage_type": "argument"}, {"api_name": "torch.utils", "line_number": 146, "usage_type": "attribute"}, {"api_name": "dataset.caption_dataset", "line_number": 153, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 160, "usage_type": "call"}, {"api_name": "dataset.caption_dataset", "line_number": 161, "usage_type": "argument"}]}
+{"seq_id": "21387611178", "text": "import collections\nfrom .texttable import Texttable\nfrom . import termui\nimport click\nfrom rich import print as rprint\nfrom rich.table import Table\n\n\n# -----------------------------------------------------------------------------\ndef dumpSubRegs(node):\n regs = {}\n for i in sorted(node.getNodes()):\n regs[i] = node.getNode(i).read()\n node.getClient().dispatch()\n\n return {k: v.value() for k, v in regs.items()}\n\n\n# -----------------------------------------------------------------------------\ndef dumpReg(node):\n v = node.read()\n node.getClient().dispatch()\n return {node.getId(): v.value()}\n\n\n# # -----------------------------------------------------------------------------\n# def readTpgStatus(node):\n\n# row_names = [\n# 'upck >> hsc',\n# 'hsc >> psub',\n# 'psub >> fir ',\n# 'fir >> hf',\n# 'hf >> hsc',\n# 'hsc >> cr_if',\n# 'tpg >> mask',\n# 'mask >> filt'\n# ]\n\n# flag_map = collections.OrderedDict([\n# ('v', 'valid'),\n# ('u', 'user'),\n# ('l', 'last')\n# ])\n\n# probes = {}\n# for i in range(4):\n# node.getNode('sel.chain').write(i)\n# node.getClient().dispatch()\n# probes[i] = dumpSubRegs(node.getNode('csr.mon'))\n\n# hdr = ['probe']+['{}'.format(k) for k in range(4)]\n# tpg_table = Texttable(max_width=0)\n# tpg_table.header(hdr)\n# tpg_table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)\n# tpg_table.set_chars(['-', '|', '+', '-'])\n# for k in range(8):\n# lbl = 'p'+str(k)\n\n# flags = ''.join([f for f, l in flag_map.items() if probes[i][lbl+'.'+l]])\n\n# row = [lbl+': '+row_names[k]]+['{} [{}] ({}) {}'.format(probes[i][lbl+'.pkt_ctr'], 'rdy' if probes[i][lbl+'.ready'] else 'bsy', flags, probes[i][lbl+'.last_err']) for i in range(4)]\n# tpg_table.add_row(row)\n# tbl = tpg_table.draw()\n# tbl = tbl.replace('[rdy]', '['+termui.kGreen+'rdy'+termui.kReset+']')\n# tbl = tbl.replace('[bsy]', '['+termui.kRed+'bsy'+termui.kReset+']')\n# return tbl\n\n\n# # -----------------------------------------------------------------------------\n# def readStreamProcessorStatus(node, nproc):\n\n# row_names = [\n# 'upck >> hsc',\n# 'hsc >> psub',\n# 'psub >> fir ',\n# 'fir >> hf',\n# 'hf >> hsc',\n# 'hsc >> cr_if',\n# 'tpg >> mask',\n# 'mask >> filt'\n# ]\n\n# flag_map = collections.OrderedDict([\n# ('v', 'valid'),\n# ('u', 'user'),\n# ('l', 'last')\n# ])\n\n# strmSelNode = node.getNode('csr.ctrl.stream_sel')\n# strmCapNode = node.getNode('csr.ctrl.cap_ctrs')\n# strmCsrNode = node.getNode('stream_proc.csr')\n# strmCapNode.write(1)\n# strmCapNode.write(0)\n# strmCapNode.getClient().dispatch()\n\n# probes = {}\n# for i in range(nproc):\n# strmSelNode.write(i)\n# node.getClient().dispatch()\n# probes[i] = dumpSubRegs(strmCsrNode.getNode('mon'))\n\n# hdr = ['probe']+['{}'.format(k) for k in range(nproc)]\n# tpg_table = Texttable(max_width=0)\n# tpg_table.header(hdr)\n# tpg_table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)\n# tpg_table.set_chars(['-', '|', '+', '-'])\n# for k in range(8):\n# lbl = 'p'+str(k)\n\n# flags = ''.join([f for f, l in flag_map.items() if probes[i][lbl+'.'+l]])\n\n# row = [lbl+': '+row_names[k]]+['{} [{}] ({}) {}'.format(probes[i][lbl+'.pkt_ctr'], 'rdy' if probes[i][lbl+'.ready'] else 'bsy', flags, probes[i][lbl+'.last_err']) for i in range(4)]\n# tpg_table.add_row(row)\n# tbl = tpg_table.draw()\n# tbl = tbl.replace('[rdy]', '['+termui.kGreen+'rdy'+termui.kReset+']')\n# tbl = tbl.replace('[bsy]', '['+termui.kRed+'bsy'+termui.kReset+']')\n# return tbl\n\n\n# # -----------------------------------------------------------------------------\n# def readSinkStatus(node):\n\n# reg_map = collections.OrderedDict([\n# ('en', 'ctrl.en'),\n# ('fifo_mode', 'ctrl.fifo_mode'),\n# ('rdy_mode', 'ctrl.rdy_mode'),\n# ('empty', 'stat.empty'),\n# ('err', 'stat.err'),\n# ('full', 'stat.full'),\n# ('count', 'count')\n# ])\n\n# sinks = {}\n# for i in range(4):\n# sinks[i] = dumpSubRegs(node.getNode('sink'+str(i)+'.csr'))\n# sinks[i].update(dumpReg(node.getNode('sink'+str(i)+'.buf.count')))\n\n# hdr = ['reg']+['{}'.format(k) for k in range(4)]\n# sink_table = Texttable(max_width=0)\n# sink_table.header(hdr)\n# sink_table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)\n# sink_table.set_chars(['-', '|', '+', '-'])\n# for k, l in reg_map.items():\n# row = [k]+[sinks[i][l] for i in range(4)]\n# sink_table.add_row(row)\n\n# return sink_table.draw()\n\n\n# # ------------------------------------------------------------------------------\n# def printRegTable(aRegs, aHeader=True, aSort=True):\n# print(( formatRegTable(aRegs, aHeader, aSort) ))\n\n\n# # ------------------------------------------------------------------------------\n# def formatRegTable(aRegs, aHeader=True, aSort=True):\n\n# lRegTable = Texttable(max_width=0)\n# lRegTable.set_deco(Texttable.VLINES | Texttable.BORDER | Texttable.HEADER)\n# lRegTable.set_chars(['-', '|', '+', '-'])\n# if aHeader:\n# lRegTable.header( ['name', 'value'] )\n\n# lRegs = sorted(aRegs) if aSort else aRegs\n# for k in lRegs:\n# lRegTable.add_row( [str(k), hex(aRegs[k])] )\n\n# return lRegTable.draw()\n\n# # ------------------------------------------------------------------------------\n# def formatDictTable(aDict, aHeader=True, aSort=True, aFmtr=str):\n# lDictTable = Texttable(max_width=0)\n# lDictTable.set_deco(Texttable.VLINES | Texttable.BORDER | Texttable.HEADER)\n# lDictTable.set_chars(['-', '|', '+', '-'])\n# if aHeader:\n# lDictTable.header( ['name', 'value'] )\n\n# for k in (sorted(aDict) if aSort else aDict):\n# v = aDict[k]\n# lDictTable.add_row( [str(k), aFmtr(v) if aFmtr else v])\n\n# return lDictTable.draw()\n\n# # ------------------------------------------------------------------------------\n# def printDictTable(aDict, aHeader=True, aSort=True, aFmtr=None):\n# print(( formatDictTable(aDict, aHeader, aSort, aFmtr) ))\n\n\n# -----------------------------------------------------------------------------\ndef dump_sub_regs(node, names: list = None):\n\n if names is None:\n names = sorted(node.getNodes())\n regs = collections.OrderedDict()\n for i in names:\n regs[i] = node.getNode(i).read()\n node.getClient().dispatch()\n\n return {k: hex(v.value()) for k, v in regs.items()}\n\n# -----------------------------------------------------------------------------\ndef dump_reg(node):\n v = node.read()\n node.getClient().dispatch()\n return {node.getId(): v.value()}\n\n# -----------------------------------------------------------------------------\ndef dict_to_table( vals: dict, **kwargs):\n t = Table(**kwargs)\n t.add_column('name')\n t.add_column('value', style='green')\n for k,v in vals.items():\n t.add_row(k,str(v))\n\n return t\n\n# -----------------------------------------------------------------------------\ndef dict_to_hextable( vals: dict, **kwargs):\n t = Table(**kwargs)\n t.add_column('name')\n t.add_column('value', style='green')\n for k,v in vals.items():\n t.add_row(k,hex(v))\n\n return t\n\n# ------------------------------------------------------------------------------\ndef print_reg_table(aRegs, **kwargs):\n rprint( dict_to_hextable(aRegs, **kwargs) )\n\n\n# ------------------------------------------------------------------------------\ndef print_dict_table(aDict, **kwargs):\n rprint(dict_to_table(aDict, **kwargs) )\n\n# -----------------------------------------------------------------------------\ndef read_stream_processor_status(node, nproc, **kwargs):\n\n row_names = [\n 'upck >> hsc',\n 'hsc >> psub',\n 'psub >> fir ',\n 'fir >> hf',\n 'hf >> meta',\n 'meta >> hsc',\n 'hsc >> mask',\n 'mask >> filt',\n 'filt >> arb'\n ]\n\n flag_map = collections.OrderedDict([\n ('v', 'valid'),\n ('u', 'user'),\n ('l', 'last')\n ])\n\n strmSelNode = node.getNode('csr.ctrl.stream_sel')\n strmCapNode = node.getNode('csr.ctrl.cap_ctrs')\n strmCsrNode = node.getNode('stream_proc.csr')\n strmCapNode.write(1)\n strmCapNode.write(0)\n strmCapNode.getClient().dispatch()\n\n probes = {}\n for i in range(nproc):\n strmSelNode.write(i)\n node.getClient().dispatch()\n probes[i] = dumpSubRegs(strmCsrNode.getNode('mon'))\n\n\n hdr = ['probe']+[f'{k}' for k in range(nproc)]\n t = Table(*hdr, **kwargs)\n\n for k in range(9):\n lbl = f'p{k}'\n\n flags = ''.join([f for f, l in flag_map.items() if probes[i][lbl+'.'+l]])\n\n row = [f\"{lbl}: {row_names[k]}\"]+['{} [{}] ({}) {}'.format(probes[i][lbl+'.pkt_ctr'], '[green]rdy[/green]' if probes[i][lbl+'.ready'] else '[red]bsy[/red]', flags, probes[i][lbl+'.last_err']) for i in range(4)]\n t.add_row(*row)\n return t\n\n\n# ------------------------------------------------------------------------------\n\n\n# ------------------------------------------------------------------------------\ndef gen_range_validator(first, last):\n \"\"\"\n Utility function to generate validators for integer number lists with range check\n\n \"\"\"\n def validate_channels(ctx, param, value):\n return _validate_range_impl(value, first, last)\n\n return validate_channels\n\n# ------------------------------------------------------------------------------\ndef validate_link_ids(ctx, param, value):\n first, last = 0, ctx.obj.mConfigInfo['n_links']\n return _validate_range_impl(value, first, last)\n\n\n# ------------------------------------------------------------------------------\ndef validate_proc_ids(ctx, param, value):\n first, last = 0, ctx.obj.mConfigInfo['n_port']\n return _validate_range_impl(value, first, last)\n\n\n# ------------------------------------------------------------------------------\ndef validate_chan_ids(ctx, param, value):\n first, last = 0, ctx.obj.mConfigInfo['n_mux']\n return _validate_range_impl(value, first, last)\n\n\n# ------------------------------------------------------------------------------\ndef _validate_range_impl(value, first, last):\n if value is None:\n return None\n\n if value == 'all':\n return list(range(first, last))\n elif value == 'none':\n return []\n\n if not value[0].isdigit():\n raise click.ClickException('Malformed option (comma separated list expected): %s' % value)\n\n _sep = ','\n _dash = '-'\n\n numbers = []\n items = value.split(_sep)\n for item in items:\n nums = item.split(_dash)\n if len(nums) == 1:\n # single number\n numbers.append(int(item))\n elif len(nums) == 2:\n i = int(nums[0])\n j = int(nums[1])\n if i > j:\n raise click.ClickException('Invalid interval '+item)\n numbers.extend(list(range(i, j+1)))\n else:\n raise click.ClickException('Malformed option (comma separated list expected): %s' % value)\n\n out_of_range = [n for n in numbers if (n < first or n >= last)]\n if any(out_of_range):\n raise click.ClickException('Values out of range %s-%s: %s' % (first, last, out_of_range))\n\n return numbers\n", "repo_name": "DUNE-DAQ/dtpcontrols", "sub_path": "python/dtpcontrols/toolbox.py", "file_name": "toolbox.py", "file_ext": "py", "file_size_in_byte": 11633, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "collections.OrderedDict", "line_number": 195, "usage_type": "call"}, {"api_name": "rich.table.Table", "line_number": 210, "usage_type": "call"}, {"api_name": "rich.table.Table", "line_number": 220, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 230, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 235, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 252, "usage_type": "call"}, {"api_name": "rich.table.Table", "line_number": 273, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 328, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 344, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 347, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 351, "usage_type": "call"}]}
+{"seq_id": "22666401843", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('questionnaire', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Run',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('runid', models.CharField(max_length=32, null=True)),\n ],\n ),\n migrations.AddField(\n model_name='answer',\n name='run',\n field=models.ForeignKey(on_delete=models.CASCADE, related_name='answers', to='questionnaire.Run', null=True),\n ),\n migrations.AddField(\n model_name='runinfo',\n name='run',\n field=models.ForeignKey(on_delete=models.CASCADE, related_name='run_infos', to='questionnaire.Run', null=True),\n ),\n migrations.AddField(\n model_name='runinfohistory',\n name='run',\n field=models.ForeignKey(on_delete=models.CASCADE, related_name='run_info_histories', to='questionnaire.Run', null=True),\n ),\n ]\n", "repo_name": "Apeirogon-inc/survey-app", "sub_path": "core/migrations/0002_auto_20160929_1320.py", "file_name": "0002_auto_20160929_1320.py", "file_ext": "py", "file_size_in_byte": 1212, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.migrations.AddField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.db.migrations.AddField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 34, "usage_type": "attribute"}]}
+{"seq_id": "18241750250", "text": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport vtk\n\nfrom wavedata.tools.obj_detection import obj_utils\nfrom wavedata.tools.visualization import vis_utils\nfrom wavedata.tools.visualization.vtk_boxes import VtkBoxes\nfrom wavedata.tools.visualization.vtk_ground_plane import VtkGroundPlane\nfrom wavedata.tools.visualization.vtk_point_cloud import VtkPointCloud\n\nimport mlod\nfrom mlod.builders.dataset_builder import DatasetBuilder\nfrom mlod.core import box_3d_encoder\nfrom mlod.core.anchor_generators import grid_anchor_3d_generator\nfrom mlod.utils import demo_utils\n\n\ndef main():\n \"\"\"\n Visualization of the mini batch anchors for RpnModel training.\n\n Keys:\n F1: Toggle mini batch anchors\n F2: Toggle positive/negative proposal anchors\n F3: Toggle easy ground truth objects (Green)\n F4: Toggle medium ground truth objects (Orange)\n F5: Toggle hard ground truth objects (Red)\n F6: Toggle all ground truth objects (default off)\n F7: Toggle ground-plane\n \"\"\"\n\n anchor_colour_scheme = {\n \"Car\": (255, 0, 0), # Red\n \"Pedestrian\": (255, 150, 50), # Orange\n \"Cyclist\": (150, 50, 100), # Purple\n \"DontCare\": (255, 255, 255), # White\n\n \"Anchor\": (150, 150, 150), # Gray\n\n \"Positive\": (0, 255, 255), # Teal\n \"Negative\": (255, 0, 255) # Bright Purple\n }\n\n ##############################\n # Options\n ##############################\n show_orientations = True\n\n # Classes name\n config_name = 'car'\n # config_name = 'ped'\n # config_name = 'cyc'\n # config_name = 'ppl'\n\n # # # Random sample # # #\n sample_name = None\n\n # Small cars\n # sample_name = '000008'\n # sample_name = '000639'\n\n # # # Cars # # #\n # sample_name = \"000001\"\n # sample_name = \"000050\"\n # sample_name = \"000112\"\n # sample_name = \"000169\"\n # sample_name = \"000191\"\n\n # # # People # # #\n # sample_name = '000000'\n\n # val_half\n # sample_name = '000001' # Hard, 1 far cyc\n # sample_name = '000005' # Easy, 1 ped\n # sample_name = '000122' # Easy, 1 cyc\n # sample_name = '000134' # Hard, lots of people\n # sample_name = '000167' # Medium, 1 ped, 2 cycs\n # sample_name = '000187' # Medium, 1 ped on left\n # sample_name = '000381' # Easy, 1 ped\n # sample_name = '000398' # Easy, 1 ped\n # sample_name = '000401' # Hard, obscured peds\n # sample_name = '000407' # Easy, 1 ped\n sample_name = '000448' # Hard, several far people\n # sample_name = '000486' # Hard 2 obscured peds\n # sample_name = '000509' # Easy, 1 ped\n # sample_name = '000718' # Hard, lots of people\n # sample_name = '002216' # Easy, 1 cyc\n\n # sample_name = \"000000\"\n # sample_name = \"000011\"\n # sample_name = \"000015\"\n # sample_name = \"000028\"\n # sample_name = \"000035\"\n # sample_name = \"000134\"\n # sample_name = \"000167\"\n # sample_name = '000379'\n # sample_name = '000381'\n # sample_name = '000397'\n # sample_name = '000398'\n # sample_name = '000401'\n # sample_name = '000407'\n # sample_name = '000486'\n # sample_name = '000509'\n\n # # Cyclists # # #\n # sample_name = '000122'\n # sample_name = '000448'\n\n # # # Multiple classes # # #\n # sample_name = \"000764\"\n ##############################\n # End of Options\n ##############################\n\n # Dataset config\n dataset_config_path = mlod.top_dir() + \\\n '/demos/configs/mb_rpn_{}.config'.format(config_name)\n\n # Create Dataset\n dataset = DatasetBuilder.load_dataset_from_config(\n dataset_config_path)\n\n # Random sample\n if sample_name is None:\n sample_idx = np.random.randint(0, dataset.num_samples)\n sample_name = dataset.sample_list[sample_idx].name\n\n anchor_strides = dataset.kitti_utils.anchor_strides\n\n img_idx = int(sample_name)\n\n print(\"Showing mini batch for sample {}\".format(sample_name))\n\n image = cv2.imread(dataset.get_rgb_image_path(sample_name))\n image_shape = [image.shape[1], image.shape[0]]\n\n # KittiUtils class\n dataset_utils = dataset.kitti_utils\n\n ground_plane = obj_utils.get_road_plane(img_idx, dataset.planes_dir)\n\n point_cloud = obj_utils.get_depth_map_point_cloud(img_idx,\n dataset.calib_dir,\n dataset.depth_dir,\n image_shape)\n\n points = point_cloud.T\n point_colours = vis_utils.project_img_to_point_cloud(points, image,\n dataset.calib_dir,\n img_idx)\n\n clusters, _ = dataset.get_cluster_info()\n anchor_generator = grid_anchor_3d_generator.GridAnchor3dGenerator()\n\n # Read mini batch info\n anchors_info = dataset_utils.get_anchors_info(\n dataset.classes_name, anchor_strides, sample_name)\n\n if not anchors_info:\n # Exit early if anchors_info is empty\n print(\"Anchors info is empty, please try a different sample\")\n return\n\n # Generate anchors for all classes\n all_anchor_boxes_3d = []\n for class_idx in range(len(dataset.classes)):\n\n anchor_boxes_3d = anchor_generator.generate(\n area_3d=dataset.kitti_utils.area_extents,\n anchor_3d_sizes=clusters[class_idx],\n anchor_stride=anchor_strides[class_idx],\n ground_plane=ground_plane)\n\n all_anchor_boxes_3d.extend(anchor_boxes_3d)\n all_anchor_boxes_3d = np.asarray(all_anchor_boxes_3d)\n\n # Use anchors info\n indices, ious, offsets, classes = anchors_info\n\n # Get non empty anchors from the indices\n anchor_boxes_3d = all_anchor_boxes_3d[indices]\n\n # Sample an RPN mini batch from the non empty anchors\n mini_batch_utils = dataset.kitti_utils.mini_batch_utils\n mb_mask_tf, _ = mini_batch_utils.sample_rpn_mini_batch(ious)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n mb_mask = sess.run(mb_mask_tf)\n\n mb_anchor_boxes_3d = anchor_boxes_3d[mb_mask]\n mb_anchor_ious = ious[mb_mask]\n\n # ObjectLabel list that hold all boxes to visualize\n obj_list = []\n\n num_positives = 0\n # Convert the mini_batch anchors to object list\n mini_batch_size = mini_batch_utils.rpn_mini_batch_size\n for i in range(mini_batch_size):\n if mb_anchor_ious[i] > mini_batch_utils.rpn_pos_iou_range[0]:\n obj_type = \"Positive\"\n num_positives += 1\n else:\n obj_type = \"Negative\"\n\n obj = box_3d_encoder.box_3d_to_object_label(mb_anchor_boxes_3d[i],\n obj_type)\n obj_list.append(obj)\n\n print('Num positives', num_positives)\n\n # Convert all non-empty anchors to object list\n non_empty_anchor_objs = \\\n [box_3d_encoder.box_3d_to_object_label(\n anchor_box_3d, obj_type='Anchor')\n for anchor_box_3d in anchor_boxes_3d]\n\n ##############################\n # Ground Truth\n ##############################\n if dataset.has_labels:\n easy_gt_objs, medium_gt_objs, \\\n hard_gt_objs, all_gt_objs = demo_utils.get_gts_based_on_difficulty(\n dataset, img_idx)\n else:\n easy_gt_objs = medium_gt_objs = hard_gt_objs = all_gt_objs = []\n\n # Visualize 2D image\n vis_utils.visualization(dataset.rgb_image_dir, img_idx)\n plt.show(block=False)\n\n # Create VtkAxes\n axes = vtk.vtkAxesActor()\n axes.SetTotalLength(5, 5, 5)\n\n # Create VtkBoxes for mini batch anchors\n vtk_pos_anchor_boxes = VtkBoxes()\n vtk_pos_anchor_boxes.set_objects(obj_list, anchor_colour_scheme)\n\n # VtkBoxes for non empty anchors\n vtk_non_empty_anchors = VtkBoxes()\n vtk_non_empty_anchors.set_objects(non_empty_anchor_objs,\n anchor_colour_scheme)\n vtk_non_empty_anchors.set_line_width(0.1)\n\n # Create VtkBoxes for ground truth\n vtk_easy_gt_boxes, vtk_medium_gt_boxes, \\\n vtk_hard_gt_boxes, vtk_all_gt_boxes = \\\n demo_utils.create_gt_vtk_boxes(easy_gt_objs,\n medium_gt_objs,\n hard_gt_objs,\n all_gt_objs,\n show_orientations)\n\n vtk_point_cloud = VtkPointCloud()\n vtk_point_cloud.set_points(points, point_colours)\n vtk_point_cloud.vtk_actor.GetProperty().SetPointSize(2)\n\n vtk_ground_plane = VtkGroundPlane()\n vtk_ground_plane.set_plane(ground_plane, dataset.kitti_utils.bev_extents)\n\n # vtk_voxel_grid = VtkVoxelGrid()\n # vtk_voxel_grid.set_voxels(vx_grid)\n\n # Create Voxel Grid Renderer in bottom half\n vtk_renderer = vtk.vtkRenderer()\n vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)\n vtk_renderer.AddActor(vtk_ground_plane.vtk_actor)\n\n vtk_renderer.AddActor(vtk_hard_gt_boxes.vtk_actor)\n vtk_renderer.AddActor(vtk_medium_gt_boxes.vtk_actor)\n vtk_renderer.AddActor(vtk_easy_gt_boxes.vtk_actor)\n vtk_renderer.AddActor(vtk_all_gt_boxes.vtk_actor)\n\n # vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)\n vtk_renderer.AddActor(vtk_non_empty_anchors.vtk_actor)\n vtk_renderer.AddActor(vtk_pos_anchor_boxes.vtk_actor)\n vtk_renderer.AddActor(axes)\n vtk_renderer.SetBackground(0.2, 0.3, 0.4)\n\n # Setup Camera\n current_cam = vtk_renderer.GetActiveCamera()\n current_cam.Pitch(160.0)\n current_cam.Roll(180.0)\n\n # Zooms out to fit all points on screen\n vtk_renderer.ResetCamera()\n\n # Zoom in slightly\n current_cam.Zoom(2.5)\n\n # Reset the clipping range to show all points\n vtk_renderer.ResetCameraClippingRange()\n\n # Setup Render Window\n vtk_render_window = vtk.vtkRenderWindow()\n mb_iou_thresholds = np.round(\n [mini_batch_utils.rpn_neg_iou_range[1],\n mini_batch_utils.rpn_pos_iou_range[0]], 3)\n vtk_render_window.SetWindowName(\n 'Sample {} RPN Mini Batch {}/{}, '\n 'Num Positives {}'.format(\n sample_name,\n mb_iou_thresholds[0],\n mb_iou_thresholds[1],\n num_positives))\n vtk_render_window.SetSize(900, 500)\n vtk_render_window.AddRenderer(vtk_renderer)\n\n # Setup custom interactor style, which handles mouse and key events\n vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()\n vtk_render_window_interactor.SetRenderWindow(vtk_render_window)\n\n vtk_render_window_interactor.SetInteractorStyle(\n vis_utils.ToggleActorsInteractorStyle([\n vtk_non_empty_anchors.vtk_actor,\n vtk_pos_anchor_boxes.vtk_actor,\n\n vtk_easy_gt_boxes.vtk_actor,\n vtk_medium_gt_boxes.vtk_actor,\n vtk_hard_gt_boxes.vtk_actor,\n vtk_all_gt_boxes.vtk_actor,\n\n vtk_ground_plane.vtk_actor\n ]))\n\n # Render in VTK\n vtk_render_window.Render()\n vtk_render_window_interactor.Start()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "JianDeng2018/MLOD", "sub_path": "demos/mini_batch_rpn_vis.py", "file_name": "mini_batch_rpn_vis.py", "file_ext": "py", "file_size_in_byte": 11122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "85", "api": [{"api_name": "mlod.top_dir", "line_number": 118, "usage_type": "call"}, {"api_name": "mlod.builders.dataset_builder.DatasetBuilder.load_dataset_from_config", "line_number": 122, "usage_type": "call"}, {"api_name": "mlod.builders.dataset_builder.DatasetBuilder", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 127, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 136, "usage_type": "call"}, {"api_name": "wavedata.tools.obj_detection.obj_utils.get_road_plane", "line_number": 142, "usage_type": "call"}, {"api_name": "wavedata.tools.obj_detection.obj_utils", "line_number": 142, "usage_type": "name"}, {"api_name": "wavedata.tools.obj_detection.obj_utils.get_depth_map_point_cloud", "line_number": 144, "usage_type": "call"}, {"api_name": "wavedata.tools.obj_detection.obj_utils", "line_number": 144, "usage_type": "name"}, {"api_name": "wavedata.tools.visualization.vis_utils.project_img_to_point_cloud", "line_number": 150, "usage_type": "call"}, {"api_name": "wavedata.tools.visualization.vis_utils", "line_number": 150, "usage_type": "name"}, {"api_name": "mlod.core.anchor_generators.grid_anchor_3d_generator.GridAnchor3dGenerator", "line_number": 155, "usage_type": "call"}, {"api_name": "mlod.core.anchor_generators.grid_anchor_3d_generator", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 177, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 189, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 191, "usage_type": "call"}, {"api_name": "mlod.core.box_3d_encoder.box_3d_to_object_label", "line_number": 211, "usage_type": "call"}, {"api_name": "mlod.core.box_3d_encoder", "line_number": 211, "usage_type": "name"}, {"api_name": "mlod.core.box_3d_encoder.box_3d_to_object_label", "line_number": 219, "usage_type": "call"}, {"api_name": "mlod.core.box_3d_encoder", "line_number": 219, "usage_type": "name"}, {"api_name": "mlod.utils.demo_utils.get_gts_based_on_difficulty", "line_number": 228, "usage_type": "call"}, {"api_name": "mlod.utils.demo_utils", "line_number": 228, "usage_type": "name"}, {"api_name": "wavedata.tools.visualization.vis_utils.visualization", "line_number": 234, "usage_type": "call"}, {"api_name": "wavedata.tools.visualization.vis_utils", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "vtk.vtkAxesActor", "line_number": 238, "usage_type": "call"}, {"api_name": "wavedata.tools.visualization.vtk_boxes.VtkBoxes", "line_number": 242, "usage_type": "call"}, {"api_name": "wavedata.tools.visualization.vtk_boxes.VtkBoxes", "line_number": 246, "usage_type": "call"}, {"api_name": "mlod.utils.demo_utils.create_gt_vtk_boxes", "line_number": 254, "usage_type": "call"}, {"api_name": "mlod.utils.demo_utils", "line_number": 254, "usage_type": "name"}, {"api_name": "wavedata.tools.visualization.vtk_point_cloud.VtkPointCloud", "line_number": 260, "usage_type": "call"}, {"api_name": "wavedata.tools.visualization.vtk_ground_plane.VtkGroundPlane", "line_number": 264, "usage_type": "call"}, {"api_name": "vtk.vtkRenderer", "line_number": 271, "usage_type": "call"}, {"api_name": "vtk.vtkRenderWindow", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 302, "usage_type": "call"}, {"api_name": "vtk.vtkRenderWindowInteractor", "line_number": 316, "usage_type": "call"}, {"api_name": "wavedata.tools.visualization.vis_utils.ToggleActorsInteractorStyle", "line_number": 320, "usage_type": "call"}, {"api_name": "wavedata.tools.visualization.vis_utils", "line_number": 320, "usage_type": "name"}]}
+{"seq_id": "9809821377", "text": "\"\"\"\nAuthor: Sreekanth Palagiri\nfile:\nRename this file to TSP_x.py where x is your student number \n\"\"\"\nimport os\nimport sys \nimport random\nimport string\nimport logging\nfrom datetime import datetime\nimport numpy as np\nfrom prolibs.plotmodule import plotfit\nfrom matplotlib.lines import Line2D\nfrom prolibs.Individual import Individual\nimport statistics\nimport csv\n\n# set system path\ndirectory = os.getcwd()\nsys.path.insert(0,directory+\"/prolibs\")\nsys.path.insert(0,directory+\"/TSPdataset\")\nsys.path.insert(0,directory+\"/files\")\n\nlogging.basicConfig(level=logging.ERROR, filename=directory+\"/files/\"+\"logfile.txt\",\n format=\"%(message)s\")\n\n# seed program for consistent results with student id\nmyStudentNum = 184198 \nrandom.seed(myStudentNum)\n\nconfig = {1:['random','uniformCrossover','inversionMutation','randomSelection'],\n 2:['random','pmxCrossover','reciprocalExchangeMutation','randomSelection'],\n 3:['random','uniformCrossover','reciprocalExchangeMutation','stochasticUniversalSampling'],\n 4:['random','pmxCrossover','reciprocalExchangeMutation','stochasticUniversalSampling'],\n 5:['random','pmxCrossover','inversionMutation','stochasticUniversalSampling'],\n 6:['random','uniformCrossover','inversionMutation','stochasticUniversalSampling'],\n 7:['NearestNeighbour','pmxCrossover','inversionMutation','stochasticUniversalSampling'],\n 8:['NearestNeighbour','uniformCrossover','inversionMutation','stochasticUniversalSampling']\n }\n\n#variables for plotting\niteration = []\ntimeperiter=[]\nbestdistance=[]\nbestfitness=[]\naveragefitness=[]\nminfitness=[]\nmaxfitness=[]\nmedianfitness=[]\n\n\nclass BasicTSP:\n def __init__(self, _fName, _popSize, _mutationRate, _maxIterations):\n \"\"\"\n Parameters and general variables\n \"\"\"\n self.population = []\n self.matingPool = []\n self.best = None\n self.popSize = _popSize\n self.genSize = None\n self.mutationRate = _mutationRate\n self.maxIterations = _maxIterations\n self.iteration = 0\n self.now = datetime.now()\n self.fName = _fName\n self.data = {}\n\n self.readInstance()\n self.initPopulation()\n\n def readInstance(self):\n \"\"\"\n Reading an instance from fName\n \"\"\"\n file = open(directory+'//TSPdataset//'+self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()\n \n def initPopulation(self):\n \"\"\"\n Creating random individuals in the population\n \"\"\"\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data,config[confignum][0])\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n logging.error(ind_i.genes)\n print (\"Best initial sol: Distance\",self.best.getDistance(),' Fitness:', self.best.getFitness())\n\n def updateBest(self, candidate):\n if self.best == None or candidate.getFitness() > self.best.getFitness():\n self.best = candidate.copy()\n print('Best Fitness:',candidate.getFitness(),' Iteration:',self.iteration)\n \n\n def randomparentSelection(self):\n \"\"\"\n Random (uniform) selection of two individuals\n \"\"\"\n indA = self.matingPool[ random.randint(0, self.popSize-1) ]\n indB = self.matingPool[ random.randint(0, self.popSize-1) ]\n return [indA, indB]\n\n def stochasticUniversalSampling(self):\n \"\"\"\n Your stochastic universal sampling Selection Implementation\n \"\"\"\n logging.info('Population before stochastic selection at iteration:'+ str(self.iteration))\n popfit=[] # Use to log population fitness in log, not used in logic \n for i in range(self.popSize):\n logging.info(self.population[i].genes) # logging info for validation \n popfit.append(self.population[i].fitness)\n logging.info(popfit)\n\n self.matingPool = []\n f=sum(i.fitness for i in self.population) #Sum of fitness values\n p=f/self.popSize # Distance between successive points\n rn = random.uniform(0,p) #generating random number rn as starting point\n logging.info('f:'+str(f)+' p:'+str(p)+' rn:'+str(rn)+' popsize:'+str(self.popSize)) # logging info for validation\n \"\"\"\n creating N pointers of length rn+p*1,rn+P*2,...rn+P*N where p is distance between points \n and N is popsize. We are considering population size as no. of parents \n \"\"\"\n pointers =[ rn + i*p for i in range(self.popSize) ]\n logging.info('pointers:')# logging info for validation\n logging.info(pointers)# logging info for validation\n \"\"\"Pseudo code:\n For each pointer i in pointers, while fitness sum of Population[0..j] < P\n j++\n add Population[j] to matingpool. \n \"\"\"\n for i in range(len(pointers)):\n j,fitsubtotal=0,self.population[0].fitness\n while (fitsubtotal <= pointers[i]):\n j+=1\n fitsubtotal+=self.population[j].fitness\n self.matingPool.append(self.population[j].copy())\n logging.info('Population After stochastic selection (selection of mating pool):')\n for i in range(len(self.matingPool)):\n logging.info(self.matingPool[i].genes) # logging info for validation \n \n def uniformCrossover(self, indA, indB):\n \"\"\"\n Uniform Crossover Implementation\n \"\"\"\n selector = [random.randint(0, 1) for i in range(self.genSize)]#1 position doesnt change, 0 position changes\n selector = ['' if a==0 else 1 for a in selector] #replace space with empty\n A=[a if b!= '' else '' for a,b in zip(indA.genes,selector)] #make a new child with spaces where genes can be replaced\n B=[a if b!= '' else '' for a,b in zip(indB.genes,selector)] #make a new child with spaces where genes can be replaced\n #iterate trough parent, if gene is not present in child add it at first empty space\n for i in indB.genes:\n if i in A:\n pass\n else:\n for j in range(len(A)):\n if A[j] == '':\n A[j]=i\n break\n for i in indA.genes:\n if i in B:\n pass\n else:\n for j in range(len(B)):\n if B[j] == '':\n B[j]=i\n break\n indA.genes,indB.genes= A,B\n\n def pmxCrossover(self, indA, indB):\n \"\"\"\n PMX Crossover Implementation\n \"\"\"\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n A = ['']* min(indexA,indexB) + indA.genes[min(indexA,indexB):max(indexA,indexB)] + [''] * (self.genSize - max(indexA,indexB))\n B = ['']* min(indexA,indexB) + indB.genes[min(indexA,indexB):max(indexA,indexB)] + [''] * (self.genSize - max(indexA,indexB))\n for a, b in zip(indA.genes[min(indexA,indexB):max(indexA,indexB)],indB.genes[min(indexA,indexB):max(indexA,indexB)]):\n if b not in A:\n x=indB.genes.index(a)\n while A[x] !='':\n x=indB.genes.index(A[x])\n A[x]=b \n if a not in B:\n y=indA.genes.index(b)\n while B[y] !='':\n y=indA.genes.index(B[y])\n B[y]=a \n for i in range(self.genSize):\n if A[i] == '':\n A[i] = indB.genes[i]\n if B[i] == '':\n B[i] = indA.genes[i]\n\n indA.genes=A\n indB.genes=B\n\n def reciprocalExchangeMutation(self, ind):\n \"\"\"\n Your Reciprocal Exchange Mutation implementation\n \"\"\"\n if random.random() > self.mutationRate:\n return\n \n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n \n def inversionMutation(self, ind):\n \"\"\"\n Inversion Mutation implementation\n \"\"\"\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n tmp=ind.genes[min(indexA,indexB):max(indexA,indexB)]\n tmp.reverse()\n ind.genes[min(indexA,indexB):max(indexA,indexB)]=tmp\n \n def crossover(self, indA, indB):\n \"\"\"\n Executes a 1 order crossover and returns a new individual\n \"\"\"\n child = []\n tmp = {}\n\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n for i in range(0, self.genSize):\n if i >= min(indexA, indexB) and i <= max(indexA, indexB):\n tmp[indA.genes[i]] = False\n else:\n tmp[indA.genes[i]] = True\n aux = []\n for i in range(0, self.genSize):\n if not tmp[indB.genes[i]]:\n child.append(indB.genes[i])\n else:\n aux.append(indB.genes[i])\n child += aux\n return child\n\n def mutation(self, ind):\n \"\"\"\n Mutate an individual by swaping two cities with certain probability (i.e., mutation rate)\n \"\"\"\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n\n ind.computeFitness()\n self.updateBest(ind)\n\n def randomSelection(self):\n \"\"\"\n Updating the mating pool before creating a new generation\n \"\"\"\n self.matingPool = []\n for ind_i in self.population:\n self.matingPool.append( ind_i.copy() )\n \n def newGeneration(self):\n for i in range(0, round(self.popSize/2)):\n \"\"\"\n Depending of your experiment you need to use the most suitable algorithms for:\n 1. Select two candidates\n 2. Apply Crossover\n 3. Apply Mutation\n \"\"\"\n indA, indB = self.randomparentSelection()\n logging.info('Randomly selected parents for mating:'+ str(indA.genes) + str(indB.genes))\n getattr(ga,config[confignum][1])(indA, indB)\n logging.info('After '+ config[confignum][1] +':'+ str(indA.genes) + str(indB.genes))\n getattr(ga,config[confignum][2])(indA)\n logging.info('After Parent 1 '+ config[confignum][2] +':'+ str(indA.genes) + str(indB.genes))\n getattr(ga,config[confignum][2])(indB)\n logging.info('After Parent 2 '+ config[confignum][2] +':'+ str(indA.genes) + str(indB.genes))\n \n print (\"iteration: \",self.iteration, \"Best Distance: \",self.best.getDistance(),\"Best Fitness: \",self.best.getFitness())\n logging.info (\"iteration: \"+ str(self.iteration)+\" best distance:\"+ str(self.best.getDistance())+\" best Fitness:\"+str(self.best.getFitness()))\n logging.info (\"Gene with best Fitness:\"+ str(self.best.genes))\n \n def GAStep(self):\n \"\"\"\n One step in the GA main algorithm\n 1. Updating mating pool with current population\n 2. Creating a new Generation\n \"\"\"\n getattr(ga,config[confignum][3])()\n self.newGeneration()\n \n #updating fitness of new generation and catpuring statistics \n iteration.append(self.iteration)\n fitness =[]\n for ind in self.matingPool:\n ind.computeFitness()\n self.updateBest(ind)\n fitness.append(ind.getFitness())\n self.population=self.matingPool\n \n bestdistance.append(self.best.getDistance())\n bestfitness.append(self.best.getFitness())\n averagefitness.append(statistics.mean(fitness))\n minfitness.append(min(fitness))\n maxfitness.append(max(fitness))\n medianfitness.append(statistics.median(fitness))\n timetaken = datetime.now() - self.now\n timeperiter.append(timetaken.microseconds/1000)\n self.now = datetime.now()\n \n def search(self):\n \"\"\"\n General search template.\n Iterates for a given number of steps\n \"\"\"\n self.iteration = 0\n while self.iteration < self.maxIterations:\n self.GAStep()\n self.iteration += 1\n print (\"Total iterations: \",self.iteration)\n print (\"Best Solution: Distance - \",self.best.getDistance(),' Fitness:', self.best.getFitness())\n print(\"Gene with best Fitness:\", str(self.best.genes))\n with open(directory+\"/files/\"+\"configstats.csv\", mode='a') as stats_file:\n stats_file_writer = csv.writer(stats_file)\n stats_file_writer.writerow(['Configuration '+str(confignum),self.iteration,self.best.getFitness(),self.best.getDistance(),statistics.mean(timeperiter),(sum(timeperiter)/1000)])\n stats_file.close()\n\nif len(sys.argv) < 2:\n print (\"Error - Incorrect input\")\n print (\"Expecting python BasicTSP.py [instance] \")\n sys.exit(0)\n\nproblem_file = sys.argv[1]\n\nprint('Please choose configuration from below:')\nfor i in config:\n print(i,':',config[i])\nconfignum = int(input())\n\nif confignum < 1 or confignum > 8 :\n print (\"Error - Incorrect input for config\")\n sys.exit(0)\n\nga = BasicTSP(problem_file, 300, 0.1, 500)\nga.search()\nfilename = directory+\"/files/\"+'Configuration '+str(confignum)\nplotfit(iteration,bestfitness,averagefitness,medianfitness,minfitness,maxfitness,filename,'Configuration '+str(confignum))\n", "repo_name": "sreekanthpalagiri/geneticalgorithms-tsp", "sub_path": "tsp_R00184198.py", "file_name": "tsp_R00184198.py", "file_ext": "py", "file_size_in_byte": 14225, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.getcwd", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 25, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "name"}, {"api_name": "prolibs.Individual.Individual", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 98, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 111, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 112, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 119, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 124, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 130, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 136, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 137, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 149, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 151, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 157, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 184, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 185, "usage_type": "call"}, {"api_name": "random.random", "line_number": 212, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 215, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 216, "usage_type": "call"}, {"api_name": "random.random", "line_number": 226, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 228, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 229, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 241, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 242, "usage_type": "call"}, {"api_name": "random.random", "line_number": 262, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 264, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 265, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 291, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 293, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 295, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 297, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 300, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 301, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 323, "usage_type": "call"}, {"api_name": "statistics.median", "line_number": 326, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 327, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 327, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 329, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 329, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 344, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 345, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 348, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 351, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 353, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 362, "usage_type": "call"}, {"api_name": "prolibs.plotmodule.plotfit", "line_number": 367, "usage_type": "call"}]}
+{"seq_id": "41122811953", "text": "import os\nimport pathlib\nfrom langchain import PromptTemplate\nfrom langchain.embeddings import HuggingFaceEmbeddings\nfrom dotenv import load_dotenv\nfrom langchain.vectorstores import Milvus\nimport pandas as pd\nfrom src.utils.app_util import get_bam_creds, get_numeric_value, get_states\nfrom src.utils.data_util import loadData\nfrom langchain.chains import RetrievalQA\nfrom dotenv import load_dotenv\nfrom genai.schemas import GenerateParams\nfrom genai.extensions.langchain import LangChainInterface\n\n\nPATH = pathlib.Path(__file__).parent.resolve()\nDATA_DIR_PATH = str(PATH) + os.sep + \"data\" + os.sep\nload_dotenv()\n\n\ndef main():\n milvus_host = os.getenv(\"MILVUS_HOST\", None)\n milvus_port = os.getenv(\"MILVUS_PORT\", None)\n\n model_id = \"google/flan-t5-xxl\"\n\n creds = get_bam_creds()\n\n params = GenerateParams(\n decoding_method=\"greedy\",\n max_new_tokens=1000,\n min_new_tokens=10,\n repetition_penalty=1.4)\n\n llm = LangChainInterface(\n model=model_id,\n credentials=creds,\n params=params,\n\n )\n\n docs = []\n docs.extend(loadData(DATA_DIR_PATH, 'state_ut_water_scarcity_original'))\n docs.extend(loadData(DATA_DIR_PATH, 'state_ut_ground_water_original'))\n docs.extend(\n loadData(DATA_DIR_PATH, 'state_ut_water_conservation_harvesting_original'))\n\n print('Data added to Milvus Collection')\n\n embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n vectordb = Milvus.from_documents(\n docs,\n embeddings,\n connection_args={\"host\": milvus_host, \"port\": milvus_port},\n drop_old=True\n )\n\n prompt_template = \"\"\"\n try to answer the question in 1 word and keep answer concise as possible. Don't add any random characters\n *********\n {context}\n *********\n \n {question}\n\n Helpful Answer:\n \"\"\"\n PROMPT = PromptTemplate(\n template=prompt_template, input_variables=[\"context\", \"question\"]\n )\n\n retriever = vectordb.as_retriever(\n search_type=\"mmr\", search_kwargs={\"k\": 5})\n qa_chain = RetrievalQA.from_chain_type(\n llm=llm, chain_type=\"stuff\", retriever=retriever, return_source_documents=True,\n chain_type_kwargs={\"prompt\": PROMPT}\n )\n\n finalOutput = []\n states = get_states()\n\n for state in states:\n quest = \"What is total water harvesting in million litres for \"+state\n result = qa_chain({\"query\": quest})\n output = get_numeric_value(result['result'])\n\n quest1 = \"What is total water usage in million litres for \"+state\n result1 = qa_chain({\"query\": quest1})\n output1 = get_numeric_value(result1['result'])\n\n quest2 = \"What is total water consumption in million litres for \"+state\n result2 = qa_chain({\"query\": quest2})\n output2 = get_numeric_value(result2['result'])\n\n quest3 = \"Is there water Scarcity in \"+state\n result3 = qa_chain({\"query\": quest3})\n output3 = 'no'\n if \"yes\" in result3['result'].lower():\n output3 = \"yes\"\n\n finalOutput.append(\n {\"state\": state,\n \"Water Harvesting in million litres\": output,\n \"Water Usage in million litres\": output1,\n \"Water Consumption in million litres\": output2,\n \"Water Scarcity\": output3})\n print('\\n')\n print('\\n')\n print(finalOutput)\n print('\\n')\n print('\\n')\n\n df=pd.DataFrame(finalOutput)\n df.to_excel(DATA_DIR_PATH+\"output_by_watson_ai.xlsx\", index=False)\n\n\nmain()\n", "repo_name": "tvelingkar/Aqua-Rescue", "sub_path": "insights/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3520, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 17, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 18, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 22, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "src.utils.app_util.get_bam_creds", "line_number": 27, "usage_type": "call"}, {"api_name": "genai.schemas.GenerateParams", "line_number": 29, "usage_type": "call"}, {"api_name": "genai.extensions.langchain.LangChainInterface", "line_number": 35, "usage_type": "call"}, {"api_name": "src.utils.data_util.loadData", "line_number": 43, "usage_type": "call"}, {"api_name": "src.utils.data_util.loadData", "line_number": 44, "usage_type": "call"}, {"api_name": "src.utils.data_util.loadData", "line_number": 46, "usage_type": "call"}, {"api_name": "langchain.embeddings.HuggingFaceEmbeddings", "line_number": 50, "usage_type": "call"}, {"api_name": "langchain.vectorstores.Milvus.from_documents", "line_number": 51, "usage_type": "call"}, {"api_name": "langchain.vectorstores.Milvus", "line_number": 51, "usage_type": "name"}, {"api_name": "langchain.PromptTemplate", "line_number": 68, "usage_type": "call"}, {"api_name": "langchain.chains.RetrievalQA.from_chain_type", "line_number": 74, "usage_type": "call"}, {"api_name": "langchain.chains.RetrievalQA", "line_number": 74, "usage_type": "name"}, {"api_name": "src.utils.app_util.get_states", "line_number": 80, "usage_type": "call"}, {"api_name": "src.utils.app_util.get_numeric_value", "line_number": 85, "usage_type": "call"}, {"api_name": "src.utils.app_util.get_numeric_value", "line_number": 89, "usage_type": "call"}, {"api_name": "src.utils.app_util.get_numeric_value", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 113, "usage_type": "call"}]}
+{"seq_id": "29845194079", "text": "#coding:utf8\n#\n# Run this code to get the final results reported in our ijcai paper.\nfrom io import open\nimport string\nimport re\nimport random\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nimport pprint,copy\nuse_cuda = torch.cuda.is_available()\nfrom gnn_with_args import *\nfrom event_chain import EventChain\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\ndef get_event_chains(event_list):\n return ['%s_%s' % (ev[0],ev[2]) for ev in event_list]\n\ndef get_word_embedding(word,word_id,id_vec,emb_size):\n if word in word_id:\n return id_vec[word_id[word]]\n else:\n return np.zeros(emb_size,dtype=np.float32)\n\ndef get_vec_rep(questions,word_id,id_vec,emb_size,predict=False):\n rep = np.zeros((5*len(questions),9,emb_size),dtype=np.float32)\n correct_answers=[]\n for i,q in enumerate(questions):\n context_chain=get_event_chains(q[0])\n choice_chain=get_event_chains(q[1])\n correct_answers.append(q[2])\n for j,context in enumerate(context_chain):\n context_vec=get_word_embedding(context,word_id,id_vec,emb_size)\n rep[5*i:5*(i+1),j,:]=context_vec\n for k,choice in enumerate(choice_chain):\n choice_vec=get_word_embedding(choice,word_id,id_vec,emb_size)\n rep[5*i+k,-1,:]=choice_vec\n if not predict:\n input_data=Variable(torch.from_numpy(rep))\n else:\n input_data=Variable(torch.from_numpy(rep),volatile=True)\n correct_answers = Variable(torch.from_numpy(np.array(correct_answers)))\n return input_data,correct_answers\n\n\nclass Word2VecAttention(nn.Module):\n def __init__(self):\n super(Word2VecAttention, self).__init__()\n self.linear_u_one=nn.Linear(HIDDEN_DIM,1,bias=False)\n self.linear_u_one2=nn.Linear(HIDDEN_DIM,1,bias=False)\n self.linear_u_two=nn.Linear(HIDDEN_DIM,1,bias=True)\n self.linear_u_two2=nn.Linear(HIDDEN_DIM,1,bias=False)\n self.sigmoid=nn.Sigmoid()\n self.tanh=nn.Tanh()\n\n def compute_scores(self,input_data): \n weight=Variable(torch.zeros((len(input_data),8,1)).fill_(1./8))\n weighted_input=torch.mul(input_data[:,0:8,:],weight) \n a=torch.sum(weighted_input,1)\n b=input_data[:,8,:]/8.0\n scores=-torch.norm(a-b, 2, 1).view(-1,5)\n return scores\n\n def forward(self, input_data):\n return self.compute_scores(input_data)\n\n def correct_answer_position(self,L,correct_answers):\n num_correct1 = torch.sum((L[:,0] == correct_answers).type(torch.FloatTensor))\n num_correct2 = torch.sum((L[:,1] == correct_answers).type(torch.FloatTensor))\n num_correct3 = torch.sum((L[:,2] == correct_answers).type(torch.FloatTensor))\n num_correct4 = torch.sum((L[:,3] == correct_answers).type(torch.FloatTensor))\n num_correct5 = torch.sum((L[:,4] == correct_answers).type(torch.FloatTensor))\n print (\"%d / %d 1st max correct: %f\" % (num_correct1.data[0], len(correct_answers),num_correct1 / len(correct_answers) * 100.))\n print (\"%d / %d 2ed max correct: %f\" % (num_correct2.data[0], len(correct_answers),num_correct2 / len(correct_answers) * 100.))\n print (\"%d / %d 3rd max correct: %f\" % (num_correct3.data[0], len(correct_answers),num_correct3 / len(correct_answers) * 100.))\n print (\"%d / %d 4th max correct: %f\" % (num_correct4.data[0], len(correct_answers),num_correct4 / len(correct_answers) * 100.))\n print (\"%d / %d 5th max correct: %f\" % (num_correct5.data[0], len(correct_answers),num_correct5 / len(correct_answers) * 100.))\n\n def predict(self, input_data, targets):\n scores=self.forward(input_data)\n sorted, L = torch.sort(scores,descending=True)\n self.correct_answer_position(L,targets)\n selections=L[:,0]\n pickle.dump((selections != targets),open('../data/test.answer','wb'))\n num_correct = torch.sum((selections == targets).type(torch.FloatTensor))\n accuracy = num_correct / len(targets) *100.0 \n return accuracy\n\n def weights_init(self,m):\n if isinstance(m, nn.Embedding):\n nn.init.xavier_uniform(m.weight)\n elif isinstance(m, nn.GRU):\n nn.init.xavier_uniform(m.weight_hh_l0)\n nn.init.xavier_uniform(m.weight_ih_l0)\n nn.init.constant(m.bias_hh_l0,0)\n nn.init.constant(m.bias_ih_l0,0)\n elif isinstance(m, nn.Linear):\n nn.init.xavier_uniform(m.weight)\n # nn.init.uniform(m.weight)\n # nn.init.normal(m.weight)\n\ndef train(questions):\n model=Word2VecAttention()\n input_data_test,correct_answers_test=get_vec_rep(questions,word_id,id_vec,HIDDEN_DIM,predict=True)\n accuracy=model.predict(input_data_test,correct_answers_test)\n print('Test Acc: ',accuracy.data[0])\n\n\ndef process_test(scores,test_index):\n for index in test_index:\n scores[index]=np.min(scores)\n return scores\n\ndef get_acc(scores,correct_answers,name='scores',save=False):\n selections = np.argmax(scores, axis=1)\n num_correct = int(np.sum(selections == correct_answers))\n if save:\n pickle.dump((selections == correct_answers),open('./scores/'+name,'wb'),2)\n samples = len(correct_answers)\n accuracy = float(num_correct) / samples * 100.\n # print (\"%d / %d correct: %f\" % (num_correct, samples, accuracy))\n return accuracy\n\nif __name__ == '__main__':\n test_index=pickle.load(open('../data/test_index.pickle','rb'))\n\n HIDDEN_DIM = 128\n L2_penalty=0.00001\n MARGIN=0.015\n LR=0.0001\n T=1\n BATCH_SIZE=50\n EPOCHES=520\n PATIENTS=300\n test_data=Data_data(pickle.load(open('../data/corpus_index_test_with_args_all_chain.data','rb')))\n word_id,id_vec,word_vec=get_hash_for_word('/users3/zyli/github/OpenNE/output/verb_net/1_property/deepwalk_128_unweighted_with_args.txt',verb_net3_mapping_with_args)\n \n HIDDEN_DIM = 128*4\n L2_penalty=0.00001\n MARGIN=0.015\n LR=0.0001\n T=1\n BATCH_SIZE=1000\n EPOCHES=520\n PATIENTS=300\n test_data=Data_data(pickle.load(open('../data/corpus_index_test_with_args_all_chain.data','rb')))\n model=trans_to_cuda(EventGraph_With_Args(len(word_vec),HIDDEN_DIM,word_vec,L2_penalty,MARGIN,LR,T))\n model.load_state_dict(torch.load('../data/gnn_euclid_acc_52.380001068115234_.model'))\n\n data=test_data.all_data()\n correct_answers=data[2].cpu().data.numpy()\n scores1=model(data[1],data[0]).cpu().data.numpy() \n scores1=process_test(scores1,test_index)\n print (get_acc(scores1,correct_answers,'scores1'))\n\n HIDDEN_DIM = 128*4\n L2_penalty=0.00001\n MARGIN=0.015\n LR=0.0001\n T=1\n BATCH_SIZE=1000\n EPOCHES=520\n PATIENTS=300\n test_data=Data_data(pickle.load(open('../data/corpus_index_test_with_args_all.data','rb')))\n data=test_data.all_data()\n model=trans_to_cuda(EventChain(embedding_dim=HIDDEN_DIM,hidden_dim=HIDDEN_DIM,vocab_size=len(word_vec),word_vec=word_vec,num_layers=1,bidirectional=False))\n model.load_state_dict(torch.load('../data/event_chain_acc_50.98999786376953_.model'))\n accuracy,accuracy1,accuracy2,accuracy3,accuracy4,scores2=model.predict_with_minibatch(data[1],data[2])\n scores2=scores2.cpu().data.numpy() \n scores2=process_test(scores2,test_index)\n print (get_acc(scores2,correct_answers,'scores2'))\n\n scores3=pickle.load(open('../data/event_comp_test.scores','rb'),encoding='bytes')\n scores3=process_test(scores3,test_index)\n print (get_acc(scores3,correct_answers,'scores3'))\n\n\n scores1=preprocessing.scale(scores1)\n scores2=preprocessing.scale(scores2)\n scores3=preprocessing.scale(scores3)\n\n best_acc=0. \n best_i_j_k=(0,0)\n for i in np.arange(-3,3,0.1):\n for j in np.arange(-3,3,0.1):\n acc=get_acc(scores3*i+scores1*j,correct_answers)\n if best_acc recommendations for the requested users, if none are \n provided, recommendations are generated for all data\n\n - must be a list\n \"\"\"\n\n assert isinstance(users, list)\n\n if len(users) > 0:\n df = df.filter(col(\"userId\").isin(users))\n\n return df.group_by(\"userId\").sort(col(\"prediction\")).limit(n)\n", "repo_name": "renatomatz/ALS_tools", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "pyspark.sql.functions.col", "line_number": 27, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "13959477545", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# ### MultiVariate Linear Regression with L2 Regularization with K-fold crossValidation\n\n# In[79]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold \nfrom sklearn.model_selection import LeaveOneOut\n\n\n# In[80]:\n\n\ndf=pd.read_csv(\"AdmissionDataset/data.csv\")\ndf.head()\n\n\n# **Drop the Serial No because it is irrelevant in prediction**\n\n# In[81]:\n\n\ndf.drop('Serial No.',axis=1,inplace=True)\n\n\n# In[82]:\n\n\ndf.head()\n\n\n# In[90]:\n\n\ndata=np.matrix(df)\nprint(data.shape)\n\n\n# ### Feature Normalisation\n# ${x_i}$= $\\frac{x_i - \\mu}{\\sigma}$ \n# \n# Feature Normalisation is done because data in some columns is very small in comparison to other columns data.\n\n# #### Preparing the Training data\n\n# In[91]:\n\n\ndef get_data(training_data,testing_data):\n training_data=pd.DataFrame(training_data)\n testing_data=pd.DataFrame(testing_data)\n \n columns=training_data.shape[1]\n\n X=training_data.iloc[:,0:columns-1]# features Sets\n\n mu = X.mean()\n sigma = X.std()\n\n X=(X-X.mean())/X.std()\n\n Y=training_data.iloc[:,columns-1:columns] # outputSet\n X.insert(0, 'Ones', 1)\n\n \n X_train = np.matrix(X.values)\n Y_train = np.matrix(Y.values)\n\n \n columns=testing_data.shape[1]\n\n X=testing_data.iloc[:,0:columns-1]# features Sets\n\n Y=testing_data.iloc[:,columns-1:columns] # outputSet\n\n \n X=(X-mu)/sigma\n\n\n X.insert(0, 'Ones', 1)\n\n X_test = np.matrix(X.values)\n Y_test = np.matrix(Y.values)\n\n return X_train,Y_train,X_test,Y_test\n\n\n# #### Mean Square Error with L1 Regularization\n# ${J(\\theta)}$=${\\frac{1}{2m}}{\\sum_{i=0}^{m}}$(${\\hat{y_i}-{y_i})^2 + \\frac{\\lambda}{m}*\\parallel \\theta \\parallel}$ \n# \n# \n# J: is cost function\n# \n# \n# m : no. of training examples\n# \n# ${\\theta}$: parameters\n\n# In[92]:\n\n\ndef costCompute_L2(X,Y,theta,lambd):\n j=0.0\n m=X.shape[0]\n \n err = np.power((np.dot(X,theta.T)-Y),2)\n j=np.sum(err)\n reg= (lambd/m)*np.sum(theta)\n \n return j/(2*m)+reg\n\n\n# #### Gradient Descent algo\n# repeat Untill Converges{\n# \n# \n# ${{\\theta_j} :=}{{\\theta_j}}$-${\\alpha}$*${\\frac{\\partial}{\\partial {\\theta_j}} J(\\theta)}$\n# \n# \n# }\n# \n# ${\\alpha}$: Learning rate constant\n\n# In[93]:\n\n\n#Vectorised Implementation\ndef gradientDescent(X, y, theta, alpha, iters,lambd):\n \n Jhistory=np.zeros(iters)\n temp=0.0\n \n m=X.shape[0]\n for i in range(iters):\n \n pre = np.dot(X,theta.T)-y\n \n temp=theta[0,0]-(alpha/m)*np.sum(pre)\n \n delta=np.dot(np.transpose(pre),X)\n theta=theta*(1-(alpha*lambd)/m)-(alpha/m)*delta\n \n \n theta[0,0]=temp\n Jhistory[i] = costCompute_L2(X, y, theta,lambd)\n \n \n \n\n return theta,Jhistory\n\n\n# In[94]:\n\n\ndef prediction_Error(X,Y,finalParameter):\n out= np.dot(X,finalParameter.T)\n \n \n err= np.sum(np.square(out-Y))/X.shape[0]\n \n return err\n\n\n# \n\n# In[96]:\n\n\nalpha=.009\niters=1000\nlambd=.01\nkf = KFold(n_splits=3)\n\nerr_Kfold=[]\n\nfor train_index, test_index in kf.split(data):\n training_data,testing_data= data[train_index], data[test_index]\n X_train,Y_train,X_test,Y_test=get_data(training_data,testing_data)\n\n theta = np.matrix(np.random.randn(1,X_train.shape[1]))*0.01\n #print(theta.shape)\n\n minTheta, cost= gradientDescent(X_train, Y_train, theta, alpha, iters,lambd)\n error=prediction_Error(X_train,Y_train,minTheta)\n err_Kfold.append(error)\n \nprint(err_Kfold)\navgErr=0.0 \nfor i in err_Kfold:\n avgErr+=i\n \nprint(avgErr/len(err_Kfold))\n\n\n# In[ ]:\n\n\n\n\n\n# In[124]:\n\n\nchoice_kfold=[2,3,4,5,6,7,8,9,10]\n\nerr_train=[]\nerr_test=[]\nalpha=.009\niters=1000\nfor l in choice_kfold:\n err_Kfold_train=[]\n err_Kfold_test=[]\n kf = KFold(n_splits=l)\n \n for train_index, test_index in kf.split(data):\n \n training_data,testing_data= data[train_index], data[test_index]\n X_train,Y_train,X_test,Y_test=get_data(training_data,testing_data)\n #print(X_train.shape)\n \n theta = np.matrix(np.random.randn(1,X_train.shape[1]))\n #print(theta.shape)\n\n minTheta, cost= gradientDescent(X_train, Y_train, theta, alpha, iters,lambd)\n error1=prediction_Error(X_train,Y_train,minTheta)\n error2=prediction_Error(X_test,Y_test,minTheta)\n \n err_Kfold_train.append(error1)\n err_Kfold_test.append(error2)\n \n avgErr_train=0.0 \n for i in err_Kfold_train:\n avgErr_train+=i\n \n avgErr_train=avgErr_train/len(err_Kfold)\n err_train.append(avgErr_train)\n \n avgErr_test=0.0 \n for i in err_Kfold_test:\n avgErr_test+=i\n \n avgErr_test=avgErr_test/len(err_Kfold)\n err_test.append(avgErr_test) \n \n\n\n# In[125]:\n\n\nplt.rcParams['figure.figsize'] = [12, 6]\nplt.scatter(choice_kfold,err_train,label=\"Training Error\")\nplt.plot(choice_kfold,err_train,'g')\nplt.scatter(choice_kfold,err_test,label=\"Validation Error\")\nplt.plot(choice_kfold,err_test,'r')\n\nplt.legend()\n\nplt.xlabel('Regularisation Parameter')\nplt.ylabel('Error')\nplt.title('Training Error vs Reg. Parameter')\n\n\n# In[126]:\n\n\nprint(err_train)\nprint(err_test)\n\n\n# ### Leave One Out Cross Validation (LOOCV)\n# \n# **It is special case of K-Fold cross Validation**\n# > **K =m**
\n# > m: no of training examples\n\n# In[129]:\n\n\nloo = LeaveOneOut()\n\nalpha=.009\niters=1000\n\n\nerr_loo_train=[]\nerr_loo_test=[]\n\nfor train_index, test_index in loo.split(data):\n training_data,testing_data= data[train_index], data[test_index]\n X_train,Y_train,X_test,Y_test=get_data(training_data,testing_data)\n \n #print(X_train.shape)\n \n theta = np.matrix(np.random.randn(1,X_train.shape[1]))\n #print(theta.shape)\n \n minTheta, cost= gradientDescent(X_train, Y_train, theta, alpha, iters,lambd)\n error1=prediction_Error(X_train,Y_train,minTheta)\n error2=prediction_Error(X_test,Y_test,minTheta)\n\n err_loo_train.append(error1)\n err_loo_test.append(error2)\n \navgErr_train=0.0 \nfor i in err_loo_train:\n avgErr_train+=i\n\navgErr_train=avgErr_train/len(err_Kfold)\nprint(avgErr_train)\n\navgErr_test=0.0 \nfor i in err_Kfold_test:\n avgErr_test+=i\n\navgErr_test=avgErr_test/len(err_Kfold)\nprint(avgErr_test) \n \n\n\n# - It is highly computationaly expensive\n# - There is no randomness in splitting procedure, therefore it always yeilds the same result, so it is stable\n\n# In[ ]:\n\n\n\n\n", "repo_name": "ishan16696/Machine_Learning", "sub_path": "assignment7/src/q-1-5.py", "file_name": "q-1-5.py", "file_ext": "py", "file_size_in_byte": 6422, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "85", "api": [{"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 165, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 186, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 227, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 256, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "sklearn.model_selection.LeaveOneOut", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 300, "usage_type": "attribute"}]}
+{"seq_id": "18916657665", "text": "import cv2\r\nimport numpy as np\r\ncap=cv2.VideoCapture(0)\r\nwhile(cap.isOpened()):\r\n ret,frame=cap.read()\r\n frame=cv2.flip(frame,1)\r\n roi=frame[100:900, 100:900]\r\n cv2.rectangle(frame,(100,100),(300,300),(0,255,0),0)\r\n hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\r\n cv2.imshow('frame',hsv)\r\n cap.get(3)\r\n cap.get(4)\r\n lower_lim = np.array([0,20,70], dtype=np.uint8)\r\n upper_lim = np.array([20,255,255], dtype=np.uint8)\r\n mask = cv2.inRange(hsv, lower_lim, upper_lim)\r\n mask = cv2.dilate(mask,kernel,iterations = 4)\r\n mask = cv2.GaussianBlur(mask,(5,5),100)\r\n if cv2.waitKey(1000) & 0xFF == ord('q'):\r\n break\r\n #cv2.imshow('frame',frame)\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n", "repo_name": "Shashank-Shukla/Devsoc19", "sub_path": "newtry.py", "file_name": "newtry.py", "file_ext": "py", "file_size_in_byte": 725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "cv2.VideoCapture", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "21061773805", "text": "from scipy.spatial import distance as dist\nimport numpy as np\n\n\nclass Mouth:\n def __init__(self):\n self.mar = None\n self.status = None\n\n\ndef mouth_aspect_ratio(landmarks):\n A = dist.euclidean(landmarks[37, :], landmarks[84, :]) # 51, 59 # media(37,84)\n B = dist.euclidean(landmarks[267, :], landmarks[314, :]) # 53, 57 # media(267,314)\n C = dist.euclidean(landmarks[61, :], landmarks[291, :]) # 49, 55 #media(61,291)\n mar = (A + B) / (2.0 * C)\n return mar\n\n\nMOUTH_AR_THRESH = 0.6\n\n\n# (mStart, mEnd) = (49, 68)\n\ndef mouth_open(frame, faces):\n landmarks = np.array(faces[0].landmarks)[:, :2]\n outer_bottom = [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291]\n outer_top = [61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291]\n inner_bottom = [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308]\n inner_top = [78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308]\n\n mouth_obj = Mouth()\n mouth = landmarks[outer_bottom.extend(inner_top), :]\n mouthMAR = mouth_aspect_ratio(landmarks)\n mar = mouthMAR\n mouth_obj.mar = mar\n\n if mar >= MOUTH_AR_THRESH:\n mouth_obj.status = \"mouth open\"\n elif mar < MOUTH_AR_THRESH:\n mouth_obj.status = \"mouth close\"\n\n return mouth_obj\n", "repo_name": "shreyaspj20/Reliable-proctoring-AI", "sub_path": "audio_and_oral_movements/oral_movement.py", "file_name": "oral_movement.py", "file_ext": "py", "file_size_in_byte": 1239, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "73", "api": [{"api_name": "scipy.spatial.distance.euclidean", "line_number": 12, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 12, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.euclidean", "line_number": 13, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 13, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.euclidean", "line_number": 14, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "10815832880", "text": "import requests\nimport json\nimport pandas as pd\nfrom datetime import datetime\n\nastroport_factory_address = \"terra1fnywlw4edny3vw44x04xd67uzkdqluymgreu7g\"\nterraswap_factory_address = \"terra1ulgw0td86nvs4wtpsc80thv6xelk76ut7a7apj\"\n\nassets_file = open(\"assets.json\", \"r\")\nassets = json.load(assets_file)\n\n\ndef get_pair_liquidity(factory_address, asset_address):\n pair_liquidity = 0\n try:\n pair = requests.get(\n f\"https://lcd.terra.dev/wasm/contracts/{factory_address}/store?query_msg={{%22pair%22:{{%22asset_infos%22:[{{%22token%22:{{%22contract_addr%22:%22{asset_address}%22}}}},{{%22native_token%22:{{%22denom%22:%22uusd%22}}}}]}}}}\"\n ).json()\n pair_address = pair[\"result\"][\"contract_addr\"]\n pool = requests.get(\n f\"https://lcd.terra.dev/wasm/contracts/{pair_address}/store?query_msg=%7B%22pool%22:%7B%7D%7D\"\n ).json()\n pool_assets = pool[\"result\"][\"assets\"]\n native_amount = 0\n token_amount = 0\n for pool_asset in pool_assets:\n if \"token\" in pool_asset[\"info\"]:\n token_amount = int(pool_asset[\"amount\"])\n elif \"native_token\" in pool_asset[\"info\"]:\n assert pool_asset[\"info\"][\"native_token\"][\"denom\"] == \"uusd\"\n native_amount = int(pool_asset[\"amount\"])\n pair_liquidity = (native_amount * 2) / 1e6\n except:\n pass\n return pair_liquidity\n\n\ndef main():\n asset_symbols = assets.keys()\n asset_addresses = assets.values()\n asset_terraswap_liquidity = []\n asset_astroport_liquidity = []\n for (asset_symbol, asset_address) in assets.items():\n pair_liquidity = get_pair_liquidity(terraswap_factory_address, asset_address)\n asset_terraswap_liquidity.append(pair_liquidity)\n pair_liquidity = get_pair_liquidity(astroport_factory_address, asset_address)\n asset_astroport_liquidity.append(pair_liquidity)\n\n asset_data_sources = []\n df = pd.DataFrame(\n {\n \"asset_symbols\": asset_symbols,\n \"asset_address\": asset_addresses,\n \"asset_terraswap_liquidity\": asset_terraswap_liquidity,\n \"asset_astroport_liquidity\": asset_astroport_liquidity,\n }\n )\n current_date = datetime.now()\n formatted_date = current_date.strftime(\"%d_%m_%y\")\n df.to_csv(f\"asset_liquidity_info/asset_liquidity_info_{formatted_date}.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "tansawit/terra-assets-liquidity-data", "sub_path": "asset_liquidity_data.py", "file_name": "asset_liquidity_data.py", "file_ext": "py", "file_size_in_byte": 2440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "name"}]}
+{"seq_id": "22917846904", "text": "import re\nimport logging\nimport time\nfrom collections import namedtuple\n\nfrom . import utils\nimport requests\nfrom selenium import webdriver\nfrom selenium.common.exceptions import ElementNotInteractableException\n\n\n# RSA 公钥\nSHU_RSA_PUBKEY = \"\"\"\n-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDl/aCgRl9f/4ON9MewoVnV58OL\nOU2ALBi2FKc5yIsfSpivKxe7A6FitJjHva3WpM7gvVOinMehp6if2UNIkbaN+plW\nf5IwqEVxsNZpeixc4GsbY9dXEk3WtRjwGSyDLySzEESH/kpJVoxO7ijRYqU+2oSR\nwTBNePOk1H+LRQokgQIDAQAB\n-----END PUBLIC KEY-----\"\"\"\n\n# group: 1 - description, 2 - url location\nHISTORY_RE = re.compile(r'\\[\"\",\"(.*?)\",\\d+,\"\",\"(.*?)\",\"\",(?:true|false)]')\nRecord = namedtuple(\"Record\", [\"complete\", \"desc\", \"location\"])\nQuestion = namedtuple(\"Question\", [\"desc\", \"xpath\", \"type_\"])\nFORM = [\n Question(\n desc=\"我承诺,以下报送内容真实有效并可用于学校管理需要!\",\n xpath=\"//div[@id='p1_ChengNuo']/div[@class='f-field-body-cell']//i\",\n type_=\"checkbox\"\n ),\n Question(\n desc=\"当前身体状况\",\n xpath=\"//div[@id='p1_DangQSTZK']//td[1]/div/div[@class='f-field-body-cell']//i\",\n type_=\"radio\"\n ),\n Question(\n desc=\"当天是否在上海\",\n xpath=\"//div[@id='p1_ShiFSH']//td[1]/div/div[@class='f-field-body-cell']//i\",\n type_=\"radio\"\n ),\n Question(\n desc=\"当天是否住学校\",\n xpath=\"//div[@id='p1_ShiFZX']//td[1]/div/div[@class='f-field-body-cell']//i\",\n type_=\"radio\"\n ),\n Question(\n desc=\"是否家庭地址\",\n xpath=\"//div[@id='p1_ShiFZJ']//td[2]/div/div[@class='f-field-body-cell']//i\",\n type_=\"radio\"\n )\n]\n\nSAFETY_CHOICE_XPATH = \"//div[@id='p1_pnlDangSZS_DangSZS']//div[@id='fineui_%d']\"\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\"\n}\nlogger = logging.getLogger(__name__)\n\n\nclass User:\n def __init__(self, username, passwd, chrome_driver):\n self.username = username\n self.passwd = passwd\n self.chrome_driver= chrome_driver\n self.session = requests.Session()\n self.session.headers.update(headers)\n # proxies = {\n # \"http\": \"http://127.0.0.1:7890\",\n # \"https\": \"http://127.0.0.1:7890\"\n # }\n # self.session.proxies.update(proxies)\n # self.session.verify = False\n\n def set_cookie(self, oauth_session, asp_session_id, ncov2019selfreport):\n self.session.cookies.set(\"SHU_OAUTH2_SESSION\", oauth_session, domain=\"newsso.shu.edu.cn\")\n self.session.cookies.set(\"ASP.NET_SessionId\", asp_session_id, domain=\"selfreport.shu.edu.cn\")\n self.session.cookies.set(\".ncov2019selfreport\", ncov2019selfreport, domain=\"selfreport.shu.edu.cn\")\n\n def login(self):\n session = self.session\n r = session.get(\"https://selfreport.shu.edu.cn/Default.aspx\")\n url = r.url\n\n # encrypt password\n password = utils.rsa_encrypt(SHU_RSA_PUBKEY, self.passwd.encode())\n data = dict(username=self.username, password=password)\n\n r = session.post(url, data, headers={\"Referer\": url})\n if r.status_code == 200 and \"学工号:\" in r.text:\n logger.debug(\"Login success.\")\n return True\n else:\n logger.error(\"Login failed. status_code%s, url:%s\\ntext:%s\", r.status_code, r.url, r.text)\n return False\n\n def fetch_history(self):\n \"\"\"\n 获取历史填报记录\n\n :return: [Record(False, \"2021-06-17(未填报,请点击此处补报)\", \"/DayReport.aspx?day=2021-06-17\"), ...]\n \"\"\"\n r = self.session.get(\"https://selfreport.shu.edu.cn/ReportHistory.aspx\")\n if not r.url.startswith(\"https://selfreport.shu.edu.cn/\"):\n raise RuntimeError(\"invalid cookie\")\n js_object = utils.substring(r.text, \"f2_state=\", \";\")\n history = HISTORY_RE.findall(js_object)\n ret = []\n for desc, url in history:\n if \"未填报\" in desc:\n ret.append(Record(complete=False, desc=desc, location=url))\n else:\n ret.append(Record(complete=True, desc=desc, location=url))\n return ret\n\n def finish_today(self):\n \"\"\"\n 调用 selenium 完成当天的 每日一报\n :return: True on success, False on failed\n \"\"\"\n options = webdriver.ChromeOptions()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--headless\")\n if self.chrome_driver.startswith(\"http\"):\n chrome = webdriver.Remote\n else:\n chrome = webdriver.Chrome\n with chrome(self.chrome_driver, desired_capabilities=options.to_capabilities()) \\\n as driver:\n # 设置 selfreport.shu.edu.cn cookie\n driver.get(\"https://selfreport.shu.edu.cn/res/css/slick.css\")\n # cookies_name = [\"ASP.NET_SessionId\", \".ncov2019selfreport\"]\n cookies_name = [\".ncov2019selfreport\"]\n cookiejar = self.session.cookies\n for name in cookies_name:\n driver.add_cookie(dict(name=name, value=cookiejar.get(name, domain=\"selfreport.shu.edu.cn\")))\n\n # 设置 newsso.shu.edu.cn cookie\n driver.get(\"https://newsso.shu.edu.cn/static/css/alert-a1b99b3681.css\")\n driver.add_cookie(dict(\n name=\"SHU_OAUTH2_SESSION\",\n value=cookiejar.get(\"SHU_OAUTH2_SESSION\", domain=\"newsso.shu.edu.cn\")\n ))\n # 打开填报页面\n # TODO 处理“历史填报未完成弹窗”\n\n # 检测是否正在加载\n def is_loading():\n js = \"\"\"return (function(){\n let loading = $(\"#f_ajax_loading\"); \n if(loading.length > 0) {return loading.is(\":visible\");} \n return false; \n })(); \n \"\"\"\n return driver.execute_script(js)\n\n # 等待加载完成\n def waiting_loading():\n time.sleep(1)\n while is_loading():\n print(\"Waiting for loading...\")\n time.sleep(1)\n\n # 获取安全知识答案\n def get_answer():\n \"\"\"\n :return: (0, 1) # 0 表示 A, 1 表示 B...\n \"\"\"\n element = driver.find_element_by_xpath(\"//div[@id='p1_pnlDangSZS_ckda-inputEl']/a\")\n text = element.get_attribute(\"onclick\") # javascript:alert('参考答案:A');\n start = text.index(\":\") + 1\n end = text.index(\"'\", start)\n answer = text[start:end]\n answer = map(lambda x: ord(x) - ord('A'), list(answer))\n return answer\n\n driver.get(\"https://selfreport.shu.edu.cn/DayReport.aspx\")\n\n if not driver.current_url.startswith(\"https://selfreport.shu.edu.cn/DayReport.aspx\"):\n logger.info(\"invalid cookies, page redirect to: %s\", driver.current_url)\n return False\n\n for ques in FORM:\n waiting_loading()\n element = driver.find_element_by_xpath(ques.xpath)\n try:\n element.click()\n except ElementNotInteractableException:\n logger.error(\"ElementNotInteractableException: %s\", ques.desc)\n\n # 回答 消防安全问题\n for ans in get_answer():\n element = driver.find_element_by_xpath(SAFETY_CHOICE_XPATH % ans)\n try:\n element.click()\n except ElementNotInteractableException:\n logger.error(\"ElementNotInteractableException: failed to click safety question choice\")\n\n # 校验表单,这里没有判断 消防安全问题是否已回答\n is_ok = driver.execute_script(\"return F.validateForm('p1', '_self', true, false);\")\n if not is_ok:\n logger.warning(\"表单未完成\")\n return False\n else:\n logger.info(\"表单校验完成\")\n\n # 提交表单\n driver.execute_script(\"__doPostBack('p1$ctl02$btnSubmit', '');\")\n waiting_loading()\n if \"日报信息提交成功\" in driver.page_source:\n logger.info(\"提交成功\")\n return True\n else:\n logger.warning(\"提交失败\")\n logger.warning(\"Current url: %s, Page Source:\\n%s\", driver.current_url, driver.page_source)\n return False\n", "repo_name": "hwenwur/SHU-Selfreport", "sub_path": "selfreport/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 8674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 23, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 59, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 67, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 122, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 122, "usage_type": "name"}, {"api_name": "selenium.webdriver.Remote", "line_number": 126, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 126, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 128, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 128, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 160, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 163, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.ElementNotInteractableException", "line_number": 189, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.ElementNotInteractableException", "line_number": 197, "usage_type": "name"}]}
+{"seq_id": "8960747090", "text": "import discord\nfrom discord.ext import commands\nimport random\nfrom insults import get_long_insult\nfrom annoy_brad_logic import annoy_brad\nimport os\nimport sys\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(SCRIPT_DIR))\n\nfrom insults import get_long_insult, get_short_insult\n\nclass roll_dice(commands.Cog): # create a class for our cog that inherits from commands.Cog\n # this class is used to create a cog, which is a module that can be added to the bot\n\n def __init__(self, bot): # this is a special method that is called when the cog is loaded\n self.bot = bot\n\n @commands.slash_command(description=\"roll some dice\", name= \"roll\")\n # pycord will figure out the types for you\n async def roll_dice(\n self,\n ctx, \n number_of_dice: discord.Option(int, max_value= 99,\n description=\"the number of dice you want to roll\"), \n dice_size: discord.Option(int,description=\"the number of sides to the dice\"), \n is_distinct: discord.Option(bool, description=\"Do you want this set of dice to be distinct?\", default=False, required=False)\n ):\n\n if (number_of_dice > dice_size) & is_distinct:\n await ctx.respond(f\"OI! You {get_long_insult()}, You cant choose more dice then possible distinct options\")\n return\n rolls = get_dice_rolls(number_of_dice, dice_size, is_distinct)\n \n rolls.sort()\n\n embed = discord.Embed(\n title=\"YOUR FUCKING DICE\",\n description=f\"Here you go you **{get_long_insult().upper()}**\",\n color=discord.Colour.dark_gold(), # Pycord provides a class with default colors you can choose from\n )\n embed.add_field(name=\"ROLLS:\", value=f\"`{rolls}`\", inline=False)\n embed.set_thumbnail(url=\"https://scontent.xx.fbcdn.net/v/t1.15752-9/278403172_399692048829552_6640220989778099445_n.jpg?stp=dst-jpg_s403x403&_nc_cat=101&ccb=1-5&_nc_sid=aee45a&_nc_ohc=fp1v8cyJAJwAX8OItsD&_nc_ad=z-m&_nc_cid=0&_nc_ht=scontent.xx&oh=03_AVJGPV02ajRAuuVrZxJxjwaIpQNKrbd1MTu_QNLywsnqsw&oe=6289995B\")\n embed.set_author(name=\"Hive Helper Regina\", icon_url=\"https://scontent.xx.fbcdn.net/v/t1.15752-9/278403172_399692048829552_6640220989778099445_n.jpg?stp=dst-jpg_s403x403&_nc_cat=101&ccb=1-5&_nc_sid=aee45a&_nc_ohc=fp1v8cyJAJwAX8OItsD&_nc_ad=z-m&_nc_cid=0&_nc_ht=scontent.xx&oh=03_AVJGPV02ajRAuuVrZxJxjwaIpQNKrbd1MTu_QNLywsnqsw&oe=6289995B\")\n brad = await annoy_brad(ctx)\n if not brad:\n await ctx.respond(embed=embed)\n \ndef get_dice_rolls(number_of_dice, dice_size, is_distinct):\n roll_number = 0\n rolls = []\n while roll_number < number_of_dice:\n roll = random.randint(1, dice_size)\n if is_distinct:\n if roll in rolls:\n continue\n else:\n rolls.append(roll)\n roll_number += 1\n continue\n rolls.append(roll)\n roll_number+=1\n return rolls\n\ndef setup(bot): # this is called by Pycord to setup the cog\n bot.add_cog(roll_dice(bot)) # add the cog to the bot\n\n", "repo_name": "jackWilliamFreeman/hive-helper-regina", "sub_path": "cogs/roll_dice.py", "file_name": "roll_dice.py", "file_ext": "py", "file_size_in_byte": 3075, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog", "line_number": 14, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 14, "usage_type": "name"}, {"api_name": "discord.Option", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.Option", "line_number": 27, "usage_type": "call"}, {"api_name": "discord.Option", "line_number": 28, "usage_type": "call"}, {"api_name": "insults.get_long_insult", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 38, "usage_type": "call"}, {"api_name": "insults.get_long_insult", "line_number": 40, "usage_type": "call"}, {"api_name": "discord.Colour.dark_gold", "line_number": 41, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 41, "usage_type": "attribute"}, {"api_name": "annoy_brad_logic.annoy_brad", "line_number": 46, "usage_type": "call"}, {"api_name": "discord.ext.commands.slash_command", "line_number": 20, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 20, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 54, "usage_type": "call"}]}
+{"seq_id": "23545943434", "text": "import os\nimport comtypes.client\nfrom pathlib import Path\nimport tkinter as tk\nfrom tkinter import filedialog, messagebox\n\ndef ppt_to_pdf(input_file, output_file):\n powerpoint = comtypes.client.CreateObject(\"Powerpoint.Application\")\n powerpoint.Visible = 1\n\n presentation = powerpoint.Presentations.Open(str(input_file))\n presentation.ExportAsFixedFormat(str(output_file), 2) # 2 for PDF format\n presentation.Close()\n\n powerpoint.Quit()\n\ndef excel_to_pdf(input_file, output_file):\n excel = comtypes.client.CreateObject(\"Excel.Application\")\n excel.Visible = 0\n\n workbook = excel.Workbooks.Open(str(input_file))\n workbook.ExportAsFixedFormat(0, str(output_file)) # 0 for PDF format\n workbook.Close()\n\n excel.Quit()\n\ndef word_to_pdf(input_file, output_file):\n word = comtypes.client.CreateObject(\"Word.Application\")\n word.Visible = 0\n\n doc = word.Documents.Open(str(input_file))\n doc.SaveAs(str(output_file), FileFormat=17) # 17 for PDF format\n doc.Close()\n\n word.Quit()\n\ndef convert_to_pdf(input_path, output_path):\n input_file = Path(input_path)\n output_file = Path(output_path)\n\n if input_file.suffix in ['.ppt', '.pptx']:\n ppt_to_pdf(input_file, output_file)\n elif input_file.suffix in ['.xls', '.xlsx']:\n excel_to_pdf(input_file, output_file)\n elif input_file.suffix in ['.doc', '.docx']:\n word_to_pdf(input_file, output_file)\n else:\n messagebox.showerror(\"Error\", \"Unsupported file format.\")\n\ndef browse_input_file():\n file_path = filedialog.askopenfilename()\n input_entry.delete(0, tk.END)\n input_entry.insert(tk.END, file_path)\n\ndef browse_output_folder():\n folder_path = filedialog.askdirectory()\n output_entry.delete(0, tk.END)\n output_entry.insert(tk.END, folder_path)\n\ndef convert_file():\n input_file = input_entry.get()\n output_folder = output_entry.get()\n\n if not input_file or not output_folder:\n messagebox.showerror(\"Error\", \"Please select input file and output folder.\")\n return\n\n output_file = os.path.join(output_folder, Path(input_file).stem + \".pdf\")\n \n try:\n convert_to_pdf(input_file, output_file)\n messagebox.showinfo(\"Success\", \"File converted to PDF successfully!\")\n except Exception as e:\n messagebox.showerror(\"Error\", str(e))\n\n# Create the main window\nwindow = tk.Tk()\nwindow.title(\"File Converter\")\n\n# Input File\ninput_label = tk.Label(window, text=\"Input File:\")\ninput_label.grid(row=0, column=0, padx=5, pady=5, sticky=tk.W)\ninput_entry = tk.Entry(window, width=50)\ninput_entry.grid(row=0, column=1, padx=5, pady=5)\ninput_button = tk.Button(window, text=\"Browse\", command=browse_input_file)\ninput_button.grid(row=0, column=2, padx=5, pady=5)\n\n# Output Folder\noutput_label = tk.Label(window, text=\"Output Folder:\")\noutput_label.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W)\noutput_entry = tk.Entry(window, width=50)\noutput_entry.grid(row=1, column=1, padx=5, pady=5)\noutput_button = tk.Button(window, text=\"Browse\", command=browse_output_folder)\noutput_button.grid(row=1, column=2, padx=5, pady=5)\n\n# Convert Button\nconvert_button = tk.Button(window, text=\"Convert\", command=convert_file)\nconvert_button.grid(row=2, column=1, padx=5, pady=10)\n\n# Start the main loop\nwindow.mainloop()\n", "repo_name": "akkhanna10/pdf-converter", "sub_path": "file_converter.py", "file_name": "file_converter.py", "file_ext": "py", "file_size_in_byte": 3295, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "comtypes.client.client.CreateObject", "line_number": 8, "usage_type": "call"}, {"api_name": "comtypes.client.client", "line_number": 8, "usage_type": "attribute"}, {"api_name": "comtypes.client", "line_number": 8, "usage_type": "name"}, {"api_name": "comtypes.client.client.CreateObject", "line_number": 18, "usage_type": "call"}, {"api_name": "comtypes.client.client", "line_number": 18, "usage_type": "attribute"}, {"api_name": "comtypes.client", "line_number": 18, "usage_type": "name"}, {"api_name": "comtypes.client.client.CreateObject", "line_number": 28, "usage_type": "call"}, {"api_name": "comtypes.client.client", "line_number": 28, "usage_type": "attribute"}, {"api_name": "comtypes.client", "line_number": 28, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 38, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 39, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 48, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 51, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 51, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tkinter.filedialog.askdirectory", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 56, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 65, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 68, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 72, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 72, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 74, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 74, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 77, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 81, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 83, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 85, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 89, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 91, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 93, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "28750646535", "text": "import datetime\nimport logging\n\nfrom odoo import models, fields, api\n\nSELECTION_BAND = [\n (\"160m\", \"160m\"),\n (\"80m\", \"80m\"),\n (\"40m\", \"40m\"),\n (\"30m\", \"30m\"),\n (\"20m\", \"20m\"),\n (\"17m\", \"17m\"),\n (\"15m\", \"15m\"),\n (\"12m\", \"12m\"),\n (\"10m\", \"10m\")\n]\n\nSELECTION_MODE = [\n (\"CW\", \"CW\"),\n (\"SSB\", \"SSB\"),\n (\"DIGI\", \"DIGI\")\n]\n\nSELECTION_DUPE = [\n (\"normal\", \"Normal\"),\n (\"dupe\", \"DUPE\")\n]\n\n_logger = logging.getLogger(__name__)\n\n\nclass QSO(models.Model):\n _name = \"award_naval.qso\"\n _order = \"ts ASC\"\n\n callsign = fields.Char(\n string=\"Callsign\",\n required=True\n )\n\n ts = fields.Datetime(\n string=\"Date & Time\",\n required=True\n )\n\n band = fields.Selection(\n string=\"Band\",\n selection=SELECTION_BAND,\n required=True\n )\n\n mode = fields.Selection(\n string=\"Mode\",\n selection=SELECTION_MODE,\n required=True\n )\n\n reference = fields.Char(\n string=\"Reference\"\n )\n\n operator = fields.Char(\n string=\"Operator\",\n required=True\n )\n\n rawdata = fields.Char(\n string=\"Raw data\",\n readonly=True\n )\n\n reference_auto = fields.Char(\n string=\"Automatic reference\",\n readonly=True\n )\n\n dupe = fields.Selection(\n string=\"DUPE\",\n selection=SELECTION_DUPE,\n readonly=True,\n required=True,\n default=\"normal\"\n )\n\n rst_tx = fields.Char(\n string=\"RST TX\",\n compute=\"compute_rst\",\n readonly=True\n )\n\n rst_rx = fields.Char(\n string=\"RST RX\",\n compute=\"compute_rst\",\n readonly=True\n )\n\n country_id = fields.Many2one(\n string=\"Country\",\n comodel_name=\"hamutility.country\",\n compute=\"compute_country\",\n readonly=True,\n store=True\n )\n\n ts_date = fields.Date(\n string=\"Date\",\n compute=\"compute_ts_date\",\n readonly=True,\n store=True\n )\n\n ts_time = fields.Char(\n string=\"Time\",\n compute=\"compute_ts_date_time\",\n readonly=True,\n store=True\n )\n\n @api.depends(\"mode\")\n def compute_rst(self):\n for rec in self:\n rst = rec.mode in [\"CW\", \"DIGI\"] and \"599\" or \"59\"\n rec.rst_tx = rst\n rec.rst_rx = rst\n\n @api.depends(\"callsign\")\n def compute_country(self):\n utility_callsign_onj = self.env[\"hamutility.utility_callsign\"]\n\n for rec in self:\n if rec and rec.callsign:\n rec.country_id = utility_callsign_onj.get_country(rec.callsign)\n\n @api.depends(\"ts\")\n def compute_ts_date_time(self):\n for rec in self:\n rec.ts_date = rec.ts.strftime(\"%Y-%m-%d\")\n rec.ts_time = rec.ts.strftime(\"%H%M%S\")\n\n @api.model\n def action_update_reference_auto(self):\n _logger.info(\"Updating Auto Reference\")\n\n armi_obj = self.env[\"awards_naval.armi\"]\n qso_obj = self.env[\"award_naval.qso\"]\n\n armi_ids = armi_obj.search([])\n _logger.info(\"ARMI records count: %d\" % len(armi_ids))\n\n qso_ids = qso_obj.search([])\n qso_count = len(qso_ids)\n _logger.info(\"QSO count: %d\" % qso_count)\n\n count = 0\n for qso_id in qso_ids:\n qso_callsign = qso_id.callsign.upper()\n\n for armi_id in armi_ids:\n armi_callsign = armi_id.callsign.upper()\n\n if armi_callsign in qso_callsign:\n reference = armi_id.reference\n qso_id.reference_auto = reference\n _logger.info(\"Found %s for %s\" % (reference, qso_callsign))\n count += 1\n continue\n\n _logger.info(\"Registered %d references in %d QSO\" % (count, qso_count))\n\n @api.model\n def action_update_missing_reference(self):\n _logger.info(\"Updating Missing Reference\")\n\n qso_obj = self.env[\"award_naval.qso\"]\n\n qso_ids = qso_obj.search([\n (\"reference\", \"!=\", None)\n ])\n\n ref_dict = {}\n for qso_id in qso_ids:\n ref_dict[qso_id.callsign] = qso_id.reference\n\n for callsign, reference in ref_dict.items():\n qso_ids = qso_obj.search([\n (\"callsign\", \"ilike\", callsign),\n \"|\",\n (\"reference\", \"=\", False),\n (\"reference\", \"=\", \"\")\n ])\n\n qso_ids.write({\n \"reference\": reference\n })\n\n @api.model\n def action_compute_dupe(self):\n qso_ids = self.search([])\n qso_ids.write({\"dupe\": \"normal\"})\n\n qso_first = self.search([], limit=1, order=\"ts ASC\")\n qso_last = self.search([], limit=1, order=\"ts DESC\")\n\n datetime_first = qso_first.ts.replace(hour=0, minute=0, second=0, microsecond=0)\n datetime_last = qso_last.ts.replace(hour=23, minute=59, second=59, microsecond=999999)\n\n datetime_interval = datetime_last - datetime_first\n days = datetime_interval.days + 1\n\n qso_ids_group = self.read_group(domain=[], fields=[\"callsign\"], groupby=[\"callsign\"])\n callsign_list = [x[\"callsign\"] for x in qso_ids_group if x[\"callsign\"]]\n\n for callsign in callsign_list:\n for i in range(0, days):\n datetime_day = datetime_first + datetime.timedelta(days=i)\n\n self.check_dupe(callsign, datetime_day, \"CW\")\n self.check_dupe(callsign, datetime_day, \"SSB\")\n self.check_dupe(callsign, datetime_day, \"DIGI\")\n\n def check_dupe(self, callsign, datetime_day, mode):\n ts_start = datetime_day.strftime(\"%Y-%m-%d 00:00:00\")\n ts_end = datetime_day.strftime(\"%Y-%m-%d 23:59:59\")\n\n qso_ids = self.search([\n (\"callsign\", \"=\", callsign),\n (\"mode\", \"=\", mode),\n (\"ts\", \">=\", ts_start),\n (\"ts\", \"<=\", ts_end),\n ], order=\"ts ASC\")\n\n if len(qso_ids) > 1:\n _logger.info(\"DUPE for %s on %s in %s with %d QSO\" % (\n callsign, datetime_day.strftime(\"%Y-%m-%d\"), mode, len(qso_ids)\n ))\n\n count = 0\n for qso_id in qso_ids:\n qso_id.dupe = \"dupe\" if count > 0 else \"normal\"\n count += 1\n", "repo_name": "sardylan/stationlog", "sub_path": "awards/awards_naval/models/qso.py", "file_name": "qso.py", "file_ext": "py", "file_size_in_byte": 6259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 32, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 32, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 36, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 36, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime", "line_number": 41, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 41, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 46, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 46, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 52, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 52, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 58, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 58, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 62, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 62, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 67, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 67, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 72, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 72, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 77, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 77, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 85, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 85, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 91, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 91, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 97, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 97, "usage_type": "name"}, {"api_name": "odoo.fields.Date", "line_number": 105, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 105, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 112, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 112, "usage_type": "name"}, {"api_name": "odoo.api.depends", "line_number": 119, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 119, "usage_type": "name"}, {"api_name": "odoo.api.depends", "line_number": 126, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 126, "usage_type": "name"}, {"api_name": "odoo.api.depends", "line_number": 134, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 134, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 140, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 140, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 170, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 170, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 215, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 196, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 196, "usage_type": "name"}]}
+{"seq_id": "5465651530", "text": "import pymysql\n\nfrom classes_rod.classes_round_of_drinks import Person, Drink, Order\n\npeople_list = []\ndrinks_list = []\npast_orders = []\n\ndef load_db():\n data = read_db(\"people\")\n for line in data:\n people_list.append(Person(line[0], line[1], line[2], line[3], line[4]))\n data = read_db(\"drinks\")\n for line in data:\n drinks_list.append(Drink(line[0], line[1], line[2], line[3]))\n data = read_db(\"past_orders\")\n for line in data:\n past_orders.append(Order(line[0], line[1], line[2], line[3]))\n return people_list, drinks_list, past_orders\n\ndef connect_to_db():\n connection = pymysql.connect(\n host = \"localhost\",\n port = 3306,\n user = \"root\",\n password = \"password\",\n db = \"brew\",\n charset='utf8mb4',\n )\n return connection\n\ndef read_db(table_name):\n connection = connect_to_db()\n\n data = []\n cursor = connection.cursor()\n cursor.execute(f\"SELECT * FROM {table_name}\")\n data = cursor.fetchall()\n cursor.close()\n connection.close()\n return data\n\ndef write_db(table_name):\n connection = connect_to_db()\n\n cursor = connection.cursor()\n cursor.execute(f\"DELETE FROM {table_name}\")\n if \"people\" in table_name:\n for person in people_list:\n cursor.execute(f\"INSERT INTO people () values (NULL, '{person.first_name}', '{person.last_name}', '{person.age}', '{person.race}')\")\n elif \"drink\" in table_name:\n for drink in drinks_list:\n cursor.execute(f\"INSERT INTO drinks () values (NULL, '{drink.name}', '{drink.price}', '{drink.is_mixer}')\")\n else:\n for order in past_orders:\n cursor.execute(f\"\"\"INSERT INTO past_orders () values (NULL, '{order.person}', \"{order.people_drinks}\", '{order.cost}')\"\"\")\n\n connection.commit()\n cursor.close()\n connection.close()", "repo_name": "HarryAnkcorn/my_brew_app", "sub_path": "db_handling_rod/db_handling_round_of_drinks.py", "file_name": "db_handling_round_of_drinks.py", "file_ext": "py", "file_size_in_byte": 1819, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "classes_rod.classes_round_of_drinks.Person", "line_number": 12, "usage_type": "call"}, {"api_name": "classes_rod.classes_round_of_drinks.Drink", "line_number": 15, "usage_type": "call"}, {"api_name": "classes_rod.classes_round_of_drinks.Order", "line_number": 18, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "7395629687", "text": "import cv2\nimport matplotlib\nimport numpy as np\nimport sys\nimport tensorflow as tf\nfrom distutils.version import StrictVersion\nfrom googletrans import Translator\nimport os\nfrom flask import Flask, render_template\nfrom werkzeug.utils import secure_filename\nfrom flask import request\n\napp = Flask(__name__)\nsys.path.append(\"..\")\n\ndef find_tag(dict):\n if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):\n raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')\n from utils import label_map_util\n matplotlib.use('tkagg')\n\n MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'\n MODEL_FILE = MODEL_NAME + '.tar.gz'\n DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\n\n PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'\n PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n PATH_TO_IMAGE = os.path.join(dict)\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\n category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n image = cv2.imread(PATH_TO_IMAGE)\n image_expanded = np.expand_dims(image, axis=0)\n\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_expanded})\n\n objects = []\n threshold = 0.00\n\n for index, value in enumerate(classes[0]):\n object_dict = {}\n if scores[0, index] > threshold:\n object_dict[\"score\"] = \\\n scores[0, index]\n object_dict[\"name\"] = \\\n (category_index.get(value)).get('name')\n objects.append(object_dict)\n\n rate = 0\n category = ''\n for object in objects:\n if object[\"score\"] > rate:\n rate = object[\"score\"]\n category = object[\"name\"]\n\n return category\n\nclass Result:\n tr = \"\"\n fr = \"\"\n de = \"\"\n es = \"\"\n en = \"\"\n\n\ndef translate(tag):\n result = Result()\n translator = Translator()\n result.en = tag\n result.tr = translator.translate(tag, dest='tr').text\n result.fr = translator.translate(tag, dest='fr').text\n result.de = translator.translate(tag, dest='de').text\n result.es = translator.translate(tag, dest='es').text\n return result\n\n@app.route('/')\ndef upload_file1():\n return render_template('upload.html')\n\n@app.route('/uploader', methods=['GET', 'POST'])\ndef upload_file():\n f = request.files['file']\n f.save(\"pictures/\" + secure_filename(f.filename))\n dict = \"pictures/\" + secure_filename(f.filename)\n tag = find_tag(dict)\n if tag != \"\":\n html = \"BK Visual Dictionary\"\n result = translate(tag)\n body = \"| English: \" + result.en + \" |
| Spanish: \"+ result.es + \" |
| German: \" + result.de + \" |
| French: \" + result.fr + \" |
| Turkish: \" + result.tr + \" |
\"\n html = html + body + \"\"\n return html\n else:\n return \"No object detected\"\n\nif __name__ == '__main__':\n app.run()\n\n\n", "repo_name": "nburak/Visual-Dictionary-Implementation-of-Tensorflow-Object-Detection-API", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4202, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "distutils.version.StrictVersion", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.__version__", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.Graph", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.GraphDef", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.import_graph_def", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.label_map_util.create_category_index_from_labelmap", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.label_map_util", "line_number": 39, "usage_type": "name"}, {"api_name": "tensorflow.Graph", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.GraphDef", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.import_graph_def", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 58, "usage_type": "call"}, {"api_name": "googletrans.Translator", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 110, "usage_type": "call"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 111, "usage_type": "call"}]}
+{"seq_id": "35658068884", "text": "\"\"\" required imports for module functionality \"\"\"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.all_workouts, name='all_workouts'),\n path('workout_detail//', views.workout_detail, name='workout_detail'),\n path('add_workout/', views.add_workout, name='add_workout'),\n path('edit_workout//', views.edit_workout, name='edit_workout'),\n path('delete_workout//', views.delete_workout, name='delete_workout'),\n]\n", "repo_name": "RussOakham/Immortal-Fitness-Collective", "sub_path": "workout_blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "5413062316", "text": "# import os\nimport time\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom multiprocessing import Process, Pipe, Manager\nfrom flask import Flask, request\nfrom flask_classy import FlaskView, route\nimport subprocess\nimport threading\nimport json\n\n# --- This is a nice implementation of a simple timer I found online -DW --- #\n_tm = 0\n\n\ndef stopwatch(msg=''):\n tm = time.time()\n global _tm\n if _tm == 0:\n _tm = tm\n return\n print(\"%s: %.2f ms\" % (msg, 1000.0 * (tm - _tm)))\n _tm = tm\n# ------------------------------------------------------------------------- #\n\n\ndef serial_watchdog(com_pipe, debug):\n \"\"\"\n Function to be called as a process. Watches the serial ports and looks for devices plugged in\n or removed.\n Underscore at beginning prevents flask_classy from making it a route in the Flask server.\n :param com_pipe: this end of the pipe to communicate with main server process.\n :param debug: Flag whether or not debugging should be turned on.\n :return:\n \"\"\"\n _keep_communicating2 = True\n _com_freq = 2.0 # (Hz)\n _com_period = 1.0 / _com_freq # (s)\n _debug = debug\n\n _current_ports_by_ids = {}\n _new_ports_by_ids = {}\n\n while _keep_communicating2:\n\n # Do the timing of this process:\n _thread_start_time = time.time()\n\n if com_pipe.poll():\n\n _in_message = com_pipe.recv()\n\n if _in_message[0] == \"com_period\":\n _com_period = _in_message[1]\n\n if _in_message[0] == \"shutdown\":\n break\n\n # Find the serial ports and whether they belong to an arduino\n # TODO: This takes a very long time, is there a faster way?\n proc = subprocess.Popen('/home/mist-1/Work/ControlSystem/usb.sh',\n stdout=subprocess.PIPE, shell=True)\n\n output = proc.stdout.read().strip()\n\n _device_added = False\n _device_removed = False\n _device_ids = _current_ports_by_ids.keys()\n\n # Loop through all found devices and add them to a new list, remove them from the current list\n for line in output.split(\"\\n\"):\n\n if \"Arduino\" in line:\n\n port, raw_info = line.split(\" - \")\n serial_number = raw_info.split(\"_\")[-1]\n\n _new_ports_by_ids[serial_number] = port\n\n if serial_number not in _device_ids:\n _device_added = True\n else:\n del _current_ports_by_ids[serial_number]\n\n # Now, let's check if there are any devices still left in the current dict\n if len(_current_ports_by_ids) > 0:\n _device_removed = True\n\n _current_ports_by_ids = _new_ports_by_ids\n _new_ports_by_ids = {}\n\n if _debug:\n if _device_removed:\n print(\"Arduino(s) were removed\")\n if _device_added:\n print(\"Arduino(s) were added\")\n\n if _device_added or _device_removed:\n\n if _debug:\n print(\"Updated List:\")\n for key, item in _current_ports_by_ids.items():\n print (\"Arduino {} at port {}\".format(key, item))\n\n # Reverse ports_by_ids:\n _current_ids_by_ports = {}\n for myid, myport in _current_ports_by_ids.items():\n _current_ids_by_ports[myport] = myid\n\n pipe_message = [\"updated_list\", _current_ports_by_ids, _current_ids_by_ports]\n com_pipe.send(pipe_message)\n # _ports_by_ids = _current_ports_by_ids\n # _ids_by_ports = _current_ids_by_ports\n\n # Do the timing of this process:\n _sleepy_time = _com_period - time.time() + _thread_start_time\n\n if _sleepy_time > 0.0:\n\n if _debug:\n print(\"Watchdog alive, sleeping for {} s.\".format(_sleepy_time))\n\n time.sleep(_sleepy_time)\n\n\nclass ServerView(FlaskView):\n def __init__(self):\n self._debug = False\n\n # A pool of threads to communicate with the arduinos\n self._threadpool = ThreadPool(10)\n\n self._welcome_message = \"Hi, this is the MIST-1 Control System server running on a RasPi.\"\n\n self._pipe_server, pipe_serial_watcher = Pipe()\n\n # self._manager = Manager()\n # self._ports_by_ids = self._manager.dict()\n # self._ids_by_ports = self._manager.dict()\n\n self._watch_proc = Process(target=serial_watchdog, args=(pipe_serial_watcher,\n self._debug,))\n self._watch_proc.daemon = True\n\n # self._watchdog_thread = None\n\n self._keep_communicating = False\n self._initialized = False\n\n # TODO: Add another Pipe/Process combo for displaying stuff on our new display\n\n def kill(self):\n \"\"\"\n Shutdown routine.\n :return:\n \"\"\"\n print(\"Shutting Down!\")\n\n self._keep_communicating = False\n\n self._threadpool.terminate()\n self._threadpool.join()\n\n if self._watch_proc.is_alive():\n\n self._watch_proc.terminate()\n self._watch_proc.join()\n\n else:\n\n print(\"Watchdog already dead.\")\n\n func = request.environ.get('werkzeug.server.shutdown')\n\n if func is None:\n\n raise RuntimeError('Not running with the Werkzeug Server')\n\n func()\n\n return \"Shutting down...\"\n\n @route(\"/device/all/\")\n def all_devices(self):\n\n return json.dumps(self._ports_by_ids.copy())\n\n def initialize(self):\n\n if self._initialized:\n\n return \"Server has already been initialized\"\n\n else:\n\n self._keep_communicating = True\n\n threading.Timer(0.1, self._listen_to_pipe).start()\n # self._watchdog_thread = threading.Thread(target=self._listen_to_pipe)\n # self._watchdog_thread.start()\n\n time.sleep(0.2)\n\n response = self._start_watchdog()\n\n self._initialized = True\n\n return \"Initializing Control System RasPi server services...{}\".format(response)\n\n def hello(self):\n\n return self._welcome_message\n\n def _start_watchdog(self):\n\n if not self._watch_proc.is_alive():\n\n self._watch_proc.start()\n\n return \"Started the watchdog process.\"\n\n else:\n\n return \"There was already a watchdog process running!\"\n\n def _listen_to_pipe(self):\n\n if self._pipe_server.poll():\n\n gui_message = self._pipe_server.recv()\n\n if gui_message[0] == \"updated_list\":\n\n if self._debug:\n print(\"Updating ports/ids in main server\")\n\n self._ports_by_ids = gui_message[1]\n self._ids_by_ports = gui_message[2]\n\n print(self._ports_by_ids)\n print(self._ids_by_ports)\n\n if self._keep_communicating:\n threading.Timer(1.0, self._listen_to_pipe).start()\n\n\nif __name__ == \"__main__\":\n\n app = Flask(__name__)\n # server = ServerView()\n ServerView.register(app)\n app.run(host='0.0.0.0', port=5000)\n", "repo_name": "DanielWinklehner/Ion-Source-Control-System", "sub_path": "Software/RasPiServer_v2/RasPiServer_v2.py", "file_name": "RasPiServer_v2.py", "file_ext": "py", "file_size_in_byte": 7116, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 60, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "flask_classy.FlaskView", "line_number": 125, "usage_type": "name"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 130, "usage_type": "call"}, {"api_name": "multiprocessing.Pipe", "line_number": 134, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.request.environ.get", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.request.environ", "line_number": 172, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 172, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 185, "usage_type": "call"}, {"api_name": "flask_classy.route", "line_number": 182, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 197, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 201, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 248, "usage_type": "call"}]}
+{"seq_id": "21874183464", "text": "''' Telegram '''\nimport logging\n\nimport arrow\nfrom flask import Blueprint, g, redirect, request, url_for\nfrom flask.wrappers import Response\nfrom werkzeug.wrappers import Response as ResponseBase\n\nimport setting\nfrom models.telegram_db import TelegramDB\nfrom module.mc import MC\nfrom module.telegram_bot import TelegramBot\n\nVIEW_TELEGRAM = Blueprint('telegram', __name__, url_prefix='/telegram')\n\n\n@VIEW_TELEGRAM.route('/r', methods=('POST', ))\ndef receive() -> ResponseBase:\n ''' receive '''\n data = request.get_json()\n logging.info('[telegram] %s', data)\n\n if data and TelegramBot.is_command_start_linkme(data):\n uuid_data = TelegramBot.gen_uuid(chat_id=data['message']['from']['id'])\n TelegramBot.temp_fetch_user_data(data=data)\n\n resp = TelegramBot(token=setting.TELEGRAM_TOKEN).send_message(\n chat_id=data['message']['from']['id'],\n text='請繼續前往志工平台登入驗證,感謝!',\n reply_markup={\n 'inline_keyboard': [\n [{'text': '驗證(verify)',\n 'url': f\"https://{setting.DOMAIN}/telegram/verify/{uuid_data['uuid']}\"}, ],\n ]},\n )\n\n logging.info('[Telegram][Send] %s', resp.json())\n\n return Response('', status=200)\n\n\n@VIEW_TELEGRAM.route('/verify/', methods=('GET', 'POST'))\ndef link_telegram_verify(tg_uuid: str) -> ResponseBase:\n ''' Link Telegram verify '''\n if request.method == 'GET':\n mem_cache = MC.get_client()\n data = mem_cache.get(f'tg:{tg_uuid}')\n if not data:\n return Response('Expired. `/linkme` again', status=406)\n\n user_data = mem_cache.get(f\"tgu:{data['chat_id']}\")\n if data and user_data:\n save_data = {'uid': g.user['account']\n ['_id'], 'added': arrow.now().naive}\n save_data.update(user_data)\n TelegramDB().add(save_data)\n\n TelegramBot(token=setting.TELEGRAM_TOKEN).send_message(\n chat_id=save_data['id'],\n text='與 [%(uid)s](https://volunteer.coscup.org/user/%(uid)s) 完成帳號綁定!(Completed)' % save_data) # pylint: disable=line-too-long\n\n mem_cache.delete_multi(\n [f'tg:{tg_uuid}', f\"tgu:{g.user['account']['_id']}\"])\n\n logging.info('[Telegram][Send] linkme: %s %s',\n save_data['id'], save_data['uid'])\n\n return redirect(url_for('setting.link_telegram', _scheme='https', _external=True))\n\n return Response('Expired. `/linkme` again', status=406)\n\n return Response('', status=404)\n", "repo_name": "COSCUP/COSCUP-Volunteer", "sub_path": "view/telegram.py", "file_name": "telegram.py", "file_ext": "py", "file_size_in_byte": 2635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 56, "dataset": "github-code", "pt": "73", "api": [{"api_name": "flask.Blueprint", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 21, "usage_type": "call"}, {"api_name": "module.telegram_bot.TelegramBot.is_command_start_linkme", "line_number": 23, "usage_type": "call"}, {"api_name": "module.telegram_bot.TelegramBot", "line_number": 23, "usage_type": "name"}, {"api_name": "module.telegram_bot.TelegramBot.gen_uuid", "line_number": 24, "usage_type": "call"}, {"api_name": "module.telegram_bot.TelegramBot", "line_number": 24, "usage_type": "name"}, {"api_name": "module.telegram_bot.TelegramBot.temp_fetch_user_data", "line_number": 25, "usage_type": "call"}, {"api_name": "module.telegram_bot.TelegramBot", "line_number": 25, "usage_type": "name"}, {"api_name": "module.telegram_bot.TelegramBot", "line_number": 27, "usage_type": "call"}, {"api_name": "setting.TELEGRAM_TOKEN", "line_number": 27, "usage_type": "attribute"}, {"api_name": "setting.DOMAIN", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.wrappers.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.Response", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "module.mc.MC.get_client", "line_number": 46, "usage_type": "call"}, {"api_name": "module.mc.MC", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.wrappers.Response", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 53, "usage_type": "name"}, {"api_name": "arrow.now", "line_number": 54, "usage_type": "call"}, {"api_name": "models.telegram_db.TelegramDB", "line_number": 56, "usage_type": "call"}, {"api_name": "module.telegram_bot.TelegramBot", "line_number": 58, "usage_type": "call"}, {"api_name": "setting.TELEGRAM_TOKEN", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.g.user", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 63, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.wrappers.Response", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.wrappers.Response", "line_number": 72, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.Response", "line_number": 43, "usage_type": "name"}]}
+{"seq_id": "73664291117", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\n\nfrom torch.utils.data import Dataset\nfrom sklearn.metrics import classification_report\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass DisambiguationModel(nn.Module):\n def __init__(self, dim, layer_sizes, drop_outs):\n super(DisambiguationModel, self).__init__()\n self.lin_layers = nn.ModuleList([nn.Linear(dim, layer_sizes[0])])\n for i in range(len(layer_sizes)-1):\n self.lin_layers.append(nn.ReLU())\n self.lin_layers.append(nn.Dropout(drop_outs[i]))\n self.lin_layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))\n\n def forward(self, x):\n for l in self.lin_layers:\n x = l(x)\n return x\n\nclass ModelWrapper():\n def __init__(self, config, input_dim, save_path=None, device='cpu'):\n self.config = config\n self.device = device\n self.writer = SummaryWriter('{}/log'.format(save_path))\n self.model = DisambiguationModel(input_dim, config['layer_sizes'], config['drop_outs']).to(device)\n self.loss_fn = nn.BCEWithLogitsLoss().to(device)\n self.optim = optim.Adam(self.model.parameters())\n self.epoch = 0\n self.save_path = save_path\n\n def train(self, train_set):\n self.model.train()\n cum_loss = 0\n for idx, sample in enumerate(train_set):\n self.optim.zero_grad()\n pred = self.model(sample[0].to(self.device))\n loss = self.loss_fn(torch.squeeze(pred), sample[1].float().to(self.device))\n loss.backward()\n self.optim.step()\n cum_loss += loss.detach()\n if idx != 0 and idx % 4000 == 0:\n print(\"Batch {} Avg. Loss {}\".format(idx, cum_loss / 100)) \n cum_loss = 0\n\n def test(self, test_set):\n self.model.eval()\n predictions = []\n true = []\n for idx, sample in enumerate(test_set):\n with torch.no_grad():\n pred = self.model(sample[0].to(self.device))\n pred_class = torch.squeeze(torch.sigmoid(pred))\n predictions.extend(pred_class.cpu().numpy())\n true.extend(sample[1])\n if idx != 0 and idx % 4000 == 0:\n print(\"At batch {}\".format(idx))\n return true, predictions\n\n def predict(self, test_set='train', test_set_ext='train'):\n self.model.eval()\n predictions = []\n for idx, sample in enumerate(self.inputs[test_set][test_set_ext]['loader']):\n with torch.no_grad():\n pred = self.model(sample[0])\n pred_class = torch.squeeze(torch.sigmoid(pred))\n predictions.extend(pred_class.cpu().numpy())\n return predictions\n\n def eval(self, true, predictions, threshold=.5, epoch=0, write=True):\n predictions = [0 if pred <= threshold else 1 for pred in predictions]\n print(classification_report(true, predictions))\n classification_dict = classification_report(true, predictions, output_dict=True)\n if write and 'True' in classification_dict:\n for k,v in classification_dict['True'].items():\n self.writer.add_scalar(k, v, epoch)\n\n def save(self):\n print(\"Saving model\")\n torch.save({\n 'epoch': self.epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optim.state_dict(),\n }, '{}/model.pth'.format(self.save_path))\n\n def load(self):\n if 'checkpoint' in self.config and self.config['checkpoint']:\n print(\"Loading model from checkpoint\")\n checkpoint_data = torch.load(self.config['checkpoint'], map_location=self.device)\n self.model.load_state_dict(checkpoint_data['model_state_dict'])\n self.optim.load_state_dict(checkpoint_data['optimizer_state_dict'])\n self.epoch = checkpoint_data['epoch'] \n\n", "repo_name": "dave-s477/SoMeNLP", "sub_path": "somenlp/entity_disambiguation/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 3945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.BCEWithLogitsLoss", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.squeeze", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "74687599637", "text": "'''\nThis is the main script for the different plotting routines. It defines \nthe common style (pyTeX) and imports parent moduls\n\nif final is set to True, pgf is used as the backend and a copy is saved\nin the folder img\n'''\n\nfinal = False \n\nfrom pathlib import Path\nimport numpy as np\n\ncwd = Path(__file__).parent\n\n\ndef figsize(scale=1):\n '''Create nicely proportioned figure\n\n This function calculates the optimal figuresize for any given scale\n (the ratio between figuresize and textwidth. A figure with scale 1\n covers the entire writing area). Therefor it is important to know \n the textwidth of your target document. This can be obtained by using\n the command \"\\the\\textwidth\" somewhere inside your document.\n '''\n\n width_pt = 355.6595 # textwidth from latex\n in_per_pt = 1.0/72.27 # Convert pt to inch\n golden = 1.61803398875 # Aesthetic ratio \n width = width_pt * in_per_pt * scale # width in inches\n height = width / golden # height in inches\n return [width,height]\n\nimport matplotlib as mpl\n\n\nimport matplotlib.pyplot as plt\n\nif final:\n plt.style.use(str(cwd / 'TeX.mplstyle'))\n\n# create dictionary for colors\n#names = ['blue','orange','red','cyan','green','yellow','purple','pink','brown','gray']\n#colors = dict(zip(names,plt.rcParams['axes.prop_cycle'].by_key()['color']))\n\ndef newfig(scale=1,ratio=None):\n '''Create a new figure object\n\n We use the function figsize to create a figure of corresponding size.\n If the option ratio is choosen, the width of the plot is still taken\n from figsize but the ratio of the figure is determined by ratio.\n '''\n\n # we using jupyter this is required to close open plots\n #plt.clf()\n if not final:\n scale*=2\n\n size = figsize(scale)\n if not ratio:\n fig = plt.figure(figsize=size)\n else:\n fig = plt.figure(figsize=(size[0],ratio*size[0]))\n\n return fig\n\n\n\n", "repo_name": "fschmnn/pnlf", "sub_path": "src/pnlf/plot/style.py", "file_name": "style.py", "file_ext": "py", "file_size_in_byte": 1983, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "85", "api": [{"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 40, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}]}
+{"seq_id": "16754804496", "text": "\r\nimport re\r\nimport json\r\nimport time\r\nimport numpy as np\r\nfrom functools import reduce\r\nfrom collections import OrderedDict\r\nfrom stanfordnlp import Pipeline\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import stopwords\r\n\r\ndef get_token_spans(sentence, words):\r\n\r\n\tspans = []\r\n\tbegin = 0\r\n\tfor w in words:\r\n\t\t# remove all whitespaces\r\n\t\twhile not sentence.startswith(w.text):\r\n\t\t\t# if text[0] != \" \":\r\n\t\t\tassert sentence[0] == \" \"\r\n\t\t\tbegin += 1\r\n\t\t\tsentence = sentence[1:]\r\n\t\t# add span\r\n\t\tspans.append((begin, begin + len(w.text)))\r\n\t\t# update begin and text for next token\r\n\t\tbegin += len(w.text)\r\n\t\tsentence = sentence[len(w.text):]\r\n\r\n\treturn spans\r\n\r\nclass ConceptParser:\r\n\r\n\tdef __init__(self):\r\n\t\t# load stopwords\r\n\t\tself.stop_words = stopwords.words('english')\r\n\t\t# conjunctions to consider\r\n\t\tself.conjugation_tokens = ['and', 'or']\r\n\t\t# create lemmatizer and nlp-pipeline\r\n\t\tself.lemmatizer = WordNetLemmatizer()\r\n\t\tself.tokenizer = Pipeline(lang='en', processors=\"tokenize\")\r\n\t\tself.nlp = Pipeline(lang='en', processors=\"tokenize,pos,depparse\", tokenize_pretokenized=True)\r\n\t\t# map dep-type to function\r\n\t\tself.func_map = {\r\n\t\t\t'nsubj':\tnsubject,\r\n\t\t\t'det':\t\tdet,\r\n\t\t\t'dep':\t\tdep,\r\n\t\t\t'dobj':\t\tdobj,\r\n\t\t\t'acomp':\tacomp,\r\n\t\t\t'amod':\t\tamod,\r\n\t\t\t'aux':\t\taux,\r\n\t\t\t'nn':\t\tnn,\r\n\t\t\t'neg':\t\tneg,\r\n\t\t\t'prep':\t\tprep,\r\n\t\t}\r\n\r\n\tdef remove_PRP_without_NN(self, words, deps):\r\n\t\t# check all dependencies\r\n\t\tfor i, d in zip(range(len(deps)-1, -1, -1), deps[::-1]):\r\n\t\t\t# get part-of-speech tags of dependency-targets\r\n\t\t\tpos_tags = [d[0].pos, d[2].pos]\r\n\t\t\t# check condition\r\n\t\t\tif (\"PRP\" in pos_tags) and (\"NN\" not in pos_tags) and (d[1] != 'nsubj'):\r\n\t\t\t\tdel deps[i]\r\n\t\t# return dependencies\r\n\t\treturn deps\r\n\r\n\tdef process_sentence(self, sentence):\r\n\t\t# get words and dependencies\r\n\t\twords = sentence.words\r\n\t\tdeps = sentence.dependencies\r\n\t\t# remove unnecessary dependencies\r\n\t\tdeps = self.remove_PRP_without_NN(words, deps)\r\n\r\n\t\t# all words except stopwords are concepts\r\n\t\t# this differs from usual idea of concepts\r\n\t\tword_concepts = [(w,) for w in words if w.text not in self.stop_words]\r\n\t\t# get concepts from each depencency\r\n\t\tdep_concepts = (self.func_map[t](w1, w2) for (w1, t, w2) in deps if t in self.func_map)\r\n\t\tdep_concepts = [concept for concept in dep_concepts if concept is not None]\r\n\t\t# get conjugations concepts\r\n\t\tconj_positions = self.conjugation_finder(words)\r\n\t\tconj_concepts = sum(map(lambda i: self.conjugator(words, i), conj_positions), [])\r\n\t\t# get manual concepts\r\n\t\tmunual_concepts = self.manual(words)\r\n\r\n\t\t# throw all together and return\r\n\t\tconcepts = set(word_concepts + dep_concepts + conj_concepts + munual_concepts)\r\n\t\treturn list(concepts)\r\n\r\n\tdef parse(self, sentence):\r\n\t\t# tokenize sentence\r\n\t\tdoc = self.tokenizer(sentence)\r\n\t\tall_words = sum([sent.words for sent in doc.sentences], [])\r\n\t\t# get word spans\r\n\t\tall_word_spans = get_token_spans(sentence, all_words)\r\n\t\t# apply lemmatizer on all words and\r\n\t\t# reconstruct document from tokens such that nlp pipeline \r\n\t\t# recreates the exact sentences and tokens\r\n\t\ttokenized_sentence = '\\n'.join([' '.join([self.lemmatizer.lemmatize(w.text) for w in sent.words]) for sent in doc.sentences])\r\n\t\t# apply pipeline\r\n\t\tdoc = self.nlp(tokenized_sentence)\r\n\t\t# apply word-spans to words\r\n\t\tall_words = sum([sent.words for sent in doc.sentences], [])\r\n\t\tassert len(all_words) == len(all_word_spans)\r\n\t\tfor w, (b, e) in zip(all_words, all_word_spans):\r\n\t\t\tw.begin = b\r\n\t\t\tw.end = e\r\n\t\t# process single sentence\r\n\t\treturn self.process_sentence(doc.sentences[0])\r\n\r\n\t# This rule has been created for \"TO\" type postags for relation between objects\r\n\tdef manual(self, words):\r\n\t\tmanual_concepts = []\r\n\r\n\t\tfor i in range(1, len(words) - 1):\r\n\t\t\tword_span = (words[i-1], words[i], words[i+1])\r\n\t\t\tpos_span = words[i-1].pos + words[i].pos + words[i+1].pos\r\n\t\t\t\r\n\t\t\tif pos_span in [\"JJTOVB\", \"JJTOVBD\", \"JJTOVBZ\", \"JJSTOVB\", \"JJSTOVBD\", \"JJSTOVBZ\", \"JJRTOVB\", \"JJRTOVBD\", \"JJRTOVBZ\"]:\r\n\t\t\t\tmanual_concepts.append(word_span)\r\n\t\t\r\n\t\treturn manual_concepts\r\n\r\n\t# This rule has been created for finding the multiple positions of conjugations\r\n\tdef conjugation_finder(self, words):\r\n\t\t# find all conjugations\r\n\t\tocc = sum(([i for i, w in enumerate(words) if w.text == t and w.pos == 'CC'] for t in self.conjugation_tokens), [])\r\n\t\tocc = sorted(occ)\r\n\t\treturn occ\r\n\r\n\t# This rule has been created for \"AND\" types for relation between structures of sentence\r\n\tdef conjugator(self, words, i):\r\n\t\tconcepts = []\r\n\r\n\t\tword1 = i - 1\r\n\t\tword2 = min((j for j, w in enumerate(words[i+1:], start=i+1) if w.pos != 'DT'), default=-1)\r\n\r\n\t\ttarget_words = [word1] + ([word2] if word2 >= 0 else [])\r\n\r\n\t\tif len(target_words) == 2:\r\n\t\t\tconcepts.append((words[word1], words[i], words[word2]))\r\n\t\t# find verb and noun\r\n\t\tverbs = list(filter(lambda i: words[i].pos == 'VB', range(i - 3, i)))\r\n\t\tnouns = list(filter(lambda i: words[i].pos == 'NN', range(i - 3, i)))\r\n\t\t# conjugation with noun\r\n\t\tif len(nouns) > 0:\r\n\t\t\tconcepts.extend(\r\n\t\t\t\t[(words[j], words[nouns[0]]) for j in target_words if j != nouns[0]]\r\n\t\t\t)\r\n\t\t# conjugation with verb\r\n\t\tif len(verbs) > 0:\r\n\t\t\tconcepts.extend(\r\n\t\t\t\t[(words[verbs[0]], words[j]) for j in target_words]\r\n\t\t\t)\r\n\t\t# relations after conjugation\r\n\t\trelations = [\"between\", \"over\", \"with\", \"on\", \"to\", \"of\", \"into\", \"in\", \"at\"]\r\n\t\tfor j, w in enumerate(words[i:], start=i):\r\n\t\t\tif w.text in relations:\r\n\t\t\t\tword3 = j + 1\r\n\t\t\t\tconcepts.extend(\r\n\t\t\t\t\t[(words[j], words[word3]) for j in target_words if j != word3]\r\n\t\t\t\t)\r\n\t\t\t\tbreak\r\n\r\n\t\treturn concepts\r\n\r\n\r\n\r\n\"\"\" Dependency Types \"\"\"\r\n\r\n# nsubj : nominal subject : Nominal subject is a noun phrase which is the syntactic subject of a clause\r\ndef nsubject(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\t# DT check\r\n\tif \"DT\" not in pos:\r\n\r\n\t\t# NN and JJ check\r\n\t\tif \"JJ\" in pos:\r\n\t\t\treturn (w1, w2)\r\n\r\n\t\tif \"NN\" in pos:\t\t\t\t\t\t\t\t\r\n\t\t\tif \"PRP\" in pos:\r\n\t\t\t\treturn (w1,)\r\n\t\t\telse:\r\n\t\t\t\treturn (w2, w1)\r\n\r\n\tif \"DT\" in pos:\r\n\t\treturn (w1,)\r\n\r\n\treturn None\t\t\t\t\r\n\t\r\n# det : determiner : Determiner is the relation between the head of an NP and its determiner\r\ndef det(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\tif \"DT\" not in pos:\r\n\t\treturn (w2, w1)\r\n\tif \"DT\" in pos:\r\n\t\treturn (w1,)\r\n\r\n\treturn None\r\n\r\n# dep : dependent : Dependency is labeled as dep when the system is unable to determine a more precise dependency relation between two words\r\ndef dep(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\tif (\"DT\" not in pos) and (\"JJ\" in pos):\r\n\t\treturn (w2, w1)\r\n\r\n\tif (\"DT\" not in pos) and (\"JJ\" not in pos):\r\n\t\tif (\"NN\" in pos) and (\"VB\" not in pos):\r\n\t\t\treturn (w1,)\r\n\t\telse:\r\n\t\t\treturn (w1, w2)\r\n\r\n\tif \"DT\" in pos:\r\n\t\treturn (w1,)\r\n\r\n\treturn None\r\n\r\n# dobj : direct object : Direct object of a VP is the noun phrase which is the (accusative) object of the verb\r\ndef dobj(w1, w2):\r\n\treturn (w1, w2)\r\n\r\n# acomp : adjectival complement : Adjectival complement of a verb is an adjectival phrase which functions as the complement\r\ndef acomp(w1, w2):\r\n\treturn (w1, w2)\r\n\t\r\n# advmod : adverbial modifier : Adverbial modifier of a word is a (non-clausal) adverb or adverbial phrase (ADVP) that serves to modify the meaning of the word\r\ndef advmod(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\t#print pos\r\n\tif (\"VB\" in pos) and (\"JJ\" in pos):\r\n\t\treturn (w1, w2)\r\n\tif (\"VB\" in pos) and (\"JJ\" not in pos) and (\"IN\" in pos):\r\n\t\treturn (w1, w2)\r\n\tif (\"VB\" in pos) and (\"JJ\" not in pos) and (\"IN\" not in pos):\r\n\t\treturn (w2, w1)\r\n\tif \"VB\" not in pos:\r\n\t\treturn (w2, w1)\r\n\r\n\treturn None\r\n\r\n# amod : adjectival modifier : Adjectival modifier of an NP is any adjectival phrase that serves to modify the meaning of the NP\r\ndef amod(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\tif \"VB\" in pos:\r\n\t\treturn (w1, w2)\r\n\telse:\r\n\t\treturn (w2, w1)\r\n\r\n# aux : auxiliary : Auxiliary of a clause is a non-main verb of the clause\t\r\ndef aux(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\tif \"TO\" in pos:\r\n\t\treturn (w1,)\r\n\tif \"VB\" not in pos:\r\n\t\treturn (w2, w1)\r\n\r\n# nn : noun compound modifier : Noun compound modifier of an NP is any noun that serves to modify the head noun\r\ndef nn(w1, w2):\r\n\t# order words by index\r\n\tif w2.index < w1.index:\r\n\t\treturn (w2, w1)\r\n\telse:\r\n\t\treturn (w1, w2)\r\n\r\ndef neg(w1, w2):\r\n\tif w1 != w2:\r\n\t\treturn (w2, w1)\r\n\r\n# prep : prepositional modifier : Prepositional modifier of a verb, adjective, or noun is any prepositional phrase that serves to modify the meaning of the verb, adjective, noun, or even another prepositon\r\ndef prep(w1, w2):\r\n\treturn (w1, w2)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\tconcept = ConceptParser()\r\n\r\n\tconcepts = concept.parse(\"The coffee was hot and tasty.\")\r\n\r\n\tfor e, cs in concepts.items():\r\n\t\tprint(\"%s: %s\" % (e, str(cs)))\r\n\t\r\n\t\r\n\t\r\n\t# print(concept.parse(\"Redevelopment of the Darlington #nuclearplant shows some of the work we're famous for in numerous #nuclear projects.\"))\r\n\r\n\t# concepts_per_sentence = concept.parse_all([\r\n\t\t# \"The coffee was hot and tasty.\",\r\n\t\t# \"I enjoyed the time i spent at this new restaurant!\"\r\n\t# ])\r\n\r\n\t# for concepts in concepts_per_sentence:\r\n\t\t# print(concepts)", "repo_name": "ndoll1998/KnowBert", "sub_path": "src/knowledge/senticnet/concept_parser.py", "file_name": "concept_parser.py", "file_ext": "py", "file_size_in_byte": 9011, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 35, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 35, "usage_type": "name"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 39, "usage_type": "call"}, {"api_name": "stanfordnlp.Pipeline", "line_number": 40, "usage_type": "call"}, {"api_name": "stanfordnlp.Pipeline", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "71355054999", "text": "from typing import Any, List, Dict\n\nfrom anytree import Node, RenderTree, ContRoundStyle\n\n\nclass RenderState:\n \" Render state class. View shablbot module active in tree style. \"\n def __init__(self, modules: Dict[str, Any], main_root: Node = None):\n self.modules = modules\n self.main_root = main_root\n\n self.node: Node = self.__create_node()\n\n def __create_node(self) -> Node:\n root = self.main_root\n\n for name_module, object in self.modules.items():\n if not root: subroot = Node(name_module)\n else: subroot = Node(name_module, parent=root)\n\n if isinstance(object, Dict):\n [Node(str(v), parent=subroot) for _, v in object.items()]\n elif isinstance(object, List):\n [Node(str(item), parent=subroot) for item in object]\n\n return root if root else subroot\n\n def render(self, style = ContRoundStyle) -> None:\n \"\"\" Render tree with state bot modules.\n\n Args:\n style ([type], optional): Style how need rendered items. Defaults to ContRoundStyle.\n \"\"\"\n print(RenderTree(self.node, style=style()).by_attr())\n\n\ndef render_state(name_module: str, module: Any) -> None:\n \"\"\" Render state module bot. Use tree.\n\n Args:\n name_module (str): Modules name\n module (Any): Object for check node\n \"\"\"\n render_state = RenderState({ name_module: module })\n render_state.render()\n\n\ndef render_state_all_components(list_components: List[Any]) -> None:\n \"\"\" Render state all modules bot in tree style.\n\n Args:\n list_components (List[Any]): components bot for need rebder state. All componnets have 'get_main_data_object()' function\"\n \"\"\"\n render_state = RenderState(\n modules={\n comp.__class__.__name__ : comp.get_main_data_object()\n for comp in list_components\n },\n main_root=Node(\"Shablbot\")\n )\n render_state.render()\n", "repo_name": "Blackgard/shablbot", "sub_path": "shablbot/core/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "85", "api": [{"api_name": "typing.Dict", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 8, "usage_type": "name"}, {"api_name": "anytree.Node", "line_number": 8, "usage_type": "name"}, {"api_name": "anytree.Node", "line_number": 12, "usage_type": "name"}, {"api_name": "anytree.Node", "line_number": 18, "usage_type": "call"}, {"api_name": "anytree.Node", "line_number": 19, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 21, "usage_type": "argument"}, {"api_name": "anytree.Node", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "argument"}, {"api_name": "anytree.Node", "line_number": 24, "usage_type": "call"}, {"api_name": "anytree.Node", "line_number": 14, "usage_type": "name"}, {"api_name": "anytree.ContRoundStyle", "line_number": 28, "usage_type": "name"}, {"api_name": "anytree.RenderTree", "line_number": 34, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 48, "usage_type": "name"}, {"api_name": "anytree.Node", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "40765488776", "text": "# notes... sometimes I need to configure a proxy to do external calls to ws. \n# for that: \n\n\nimport json\n# import urllib2\n# people = json.load(urllib2.urlopen(url_oData_people))\n\n\nimport requests\nurl_oData_people = \"http://services.odata.org/TripPinRESTierService/(S(pk4yy1pao5a2nngmm2ecx0hy))/People\"\n\n# response = requests.get( url_oData_people )\n# people = response.json()\n# print(people)\n\n\n# CONVERT JOSN into object -> Pandas or dictionary array.7\n\nmovie_json = \"\"\"\n{\n\"Title\":\"Johnny 5\",\n\"Year\":\"2001\",\n\"Runtime\":\"119 min\",\n\"Country\":\"USA\"\n}\n\"\"\"\n\nmovie_data = json.loads(movie_json)\nprint(type(movie_data), movie_data)\n \nprint(\"The title is {}\".format(movie_data.get('Title')))\nmovie_json_text_2 = json.dumps(movie_data)\nprint(type(movie_json_text_2), movie_json_text_2)\n", "repo_name": "davidvela/MyFirstPythonProject", "sub_path": "zPythonLearn/pn_readJSON.py", "file_name": "pn_readJSON.py", "file_ext": "py", "file_size_in_byte": 781, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "12738271185", "text": "from flask import Blueprint, render_template, request, send_file, send_from_directory\n\nbp = Blueprint('configure_css', __name__, url_prefix='/configure')\n\n@bp.route('/', methods=['GET', 'POST'])\ndef configure():\n \n args = request.args\n\n file_path = args['file']\n\n print('FILE PATH:', file_path)\n\n from aws_s3.create_presigned_url import create_presigned_url\n image_url = create_presigned_url(file_path)\n\n return render_template('file_upload/optimise_for_web_background.html', file_path=file_path, image_url=image_url)\n", "repo_name": "vchapandrews/Seamlessly", "sub_path": "app/blueprints/configure_css_background.py", "file_name": "configure_css_background.py", "file_ext": "py", "file_size_in_byte": 539, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "flask.Blueprint", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 8, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 8, "usage_type": "name"}, {"api_name": "aws_s3.create_presigned_url.create_presigned_url", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "388623755", "text": "# Import the dependencies\nimport sqlalchemy\nfrom flask import Flask, jsonify, send_file\nfrom flask_cors import CORS\n\n# Python SQL toolkit and Object Relational Mapper\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, inspect\nimport pandas as pd\n\n#######################################################\n# Database Setup\n#######################################################\n\nengine = create_engine(\"sqlite:///../data/fastfood_obesity.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nobesity = Base.classes.obesity\nfastfood = Base.classes.fastfood\n\n# Get column names for metadata\ninspector = inspect(engine)\n\n#######################################################\n# Flask Setup\n#######################################################\n\napp = Flask(__name__)\nCORS(app)\n\n#######################################################\n# Flask Routes\n#######################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/metadata
\"\n f\"/api/v1.0/aff_ob
\"\n f\"/api/v1.0/geojson\"\n )\n\n@app.route(\"/api/v1.0/metadata\")\ndef get_metadata():\n# Create our session (link) from Python to the DB\n session = Session(engine)\n conn = engine.connect()\n #query \n Fastfood = pd.read_sql(\"SELECT * FROM Fastfood\", conn)\n Obesity = pd.read_sql(\"SELECT * FROM Obesity\", conn)\n joined_table = pd.read_sql('Select fastfood.State, fastfood.\"All fast food restaurants\",fastfood.\"Full-service restaurants\", fastfood.Subway, fastfood.Starbucks, fastfood.McDonalds, fastfood.\"Dunkin Donut\", fastfood.\"Burger King\", fastfood.\"Taco Bell\", fastfood.Dominos, fastfood.Wendys, fastfood.\"Dairy Queen\", obesity.Prevalence, obesity.\"95% CI\" from Fastfood INNER JOIN Obesity ON (Fastfood.State = Obesity.State)', conn)\n js = joined_table.to_json(orient='records')\n session.close()\n\n return js\n\n@app.route(\"/api/v1.0/aff_ob\")\ndef get_data():\n session = Session(engine)\n data = session.query(\n fastfood.State, \n obesity.Prevalence, \n getattr(fastfood, \"All fast food restaurants\")\n ).filter(\n fastfood.State == obesity.State\n ).order_by(\n getattr(fastfood, \"All fast food restaurants\")\n ).all()\n session.close()\n\n # Convert data to a list of dictionaries\n data_dicts = [dict(zip([\"State\", \"Prevalence\", \"All fast food restaurants\"], row)) for row in data]\n\n # Convert to JSON\n return jsonify(data_dicts)\n\n@app.route(\"/api/v1.0/geojson\")\ndef get_geojson():\n return send_file(\"../data/us-states-obesity.geojson\", mimetype='application/geo+json')\n\nif __name__ == \"__main__\":\n app.run(debug=True)", "repo_name": "JamesBeckmeyer/Fast-Food-Establishments-per-Capita-Compared-to-State-Obesity-Rates", "sub_path": "Database and ETL/code/obesity_app.py", "file_name": "obesity_app.py", "file_ext": "py", "file_size_in_byte": 2883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.automap.automap_base", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.inspect", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 35, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "25587801024", "text": "try:\n from aerisapisdk import aerframesdk\n from aerisapisdk import aerisconfig\n from aerisapisdk.exceptions import ApiException\n print('Using the aerisapisdk installed from pip')\nexcept ModuleNotFoundError:\n print('Using the currently-checked-out aerisapisdk')\n import os\n import inspect\n import sys\n current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n parent_dir = os.path.dirname(current_dir)\n sys.path.insert(0, parent_dir)\n from aerisapisdk import aerframesdk\n from aerisapisdk import aerisconfig\n from aerisapisdk.exceptions import ApiException\n\nimport argparse\nimport json\nimport logging\nimport sched\nimport time\n\n# only query for location once an hour\nLOCATION_REQUEST_PERIOD_SECONDS = 60*60\nlogger = None\n\n\ndef begin_loop(account_id, api_key, device_id, device_id_type):\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(1, 1, get_location_and_make_noise,\n (account_id, api_key, device_id, device_id_type, scheduler, None))\n scheduler.run()\n\n\ndef get_location_and_make_noise(account_id, api_key, device_id, device_id_type, scheduler, original_location):\n try:\n new_location = aerframesdk.get_location(account_id, api_key, device_id_type, device_id)\n logger.debug(f'Latest location = {new_location}')\n # if there actually is a current location (instead of it being unknown...)\n if is_location_present(new_location):\n # set the original location to the current location\n if original_location is None:\n original_location = new_location\n logger.info(f'The original \"stay-put\" location of the device is {original_location}')\n\n # check to see if the location changed\n if location_changed(new_location, original_location):\n logger.warn(f'The device moved!')\n\n except ApiException as e:\n logger.error(f'There was a problem calling the API', exc_info=e)\n except BaseException as e:\n logger.error(f'Something else went horribly wrong', exc_info=e)\n\n # run this function again after some delay\n scheduler.enter(LOCATION_REQUEST_PERIOD_SECONDS, 1, get_location_and_make_noise,\n (account_id, api_key, device_id, device_id_type, scheduler, original_location))\n\n\ndef is_location_present(loc):\n \"\"\"\n Checks to see if a location result has actual data, or if it is the \"no location available\" response.\n\n Parameters\n ----------\n loc: dict\n\n Returns\n -------\n True if there is actually some location data in there.\n \"\"\"\n if loc['mcc'] == 0:\n return False\n\n return True\n\n\ndef location_changed(new_loc, prev_loc):\n \"\"\"\n Examines device locations to determine if a device has moved.\n Parameters\n ----------\n new_loc: dict\n prev_loc: dict\n\n Returns\n -------\n bool\n True if the device has moved.\n \"\"\"\n result = False\n if prev_loc is None:\n return False\n for attribute in ('mcc', 'mnc', 'lac', 'cellId'):\n if new_loc[attribute] != prev_loc[attribute]:\n logger.warn(f'Device has moved from {attribute} {prev_loc[attribute]} to {new_loc[attribute]}')\n result = True\n return result\n\n\ndef configure_logging(level):\n global logger\n\n date_format_string = '%Y-%m-%dT%H:%M:%S%z'\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s', datefmt=date_format_string)\n formatter.converter = time.gmtime\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n\n # root logger is good enough\n logger = logging.getLogger('aerframe_budget_geofence')\n logger.setLevel(level)\n logger.addHandler(ch)\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser()\n argparser.add_argument('--config-file', required=True,\n help='path to a configuration file to use, like the one generated by aeriscli config')\n argparser.add_argument('--imsi', required=True, help='the IMSI you want to try to geofence')\n\n args = argparser.parse_args()\n\n # point aerisconfig at our configuration file\n aerisconfig.load_config(args.config_file)\n # load api key and account ID from the same configuration file\n with open(args.config_file, 'r') as f:\n config_dict = json.load(f)\n api_key = config_dict['apiKey']\n account_id = config_dict['accountId']\n del config_dict\n\n device_id = args.imsi\n device_id_type = 'IMSI'\n\n configure_logging(logging.INFO)\n logger.info('Starting...')\n\n # start the loop\n begin_loop(account_id, api_key, device_id, device_id_type)\n", "repo_name": "aeristhings/aeris-apisdk-py", "sub_path": "sample/aerframe_budget_geofence.py", "file_name": "aerframe_budget_geofence.py", "file_ext": "py", "file_size_in_byte": 4688, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "inspect.getfile", "line_number": 11, "usage_type": "call"}, {"api_name": "inspect.currentframe", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sched.scheduler", "line_number": 30, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "attribute"}, {"api_name": "aerisapisdk.aerframesdk.get_location", "line_number": 38, "usage_type": "call"}, {"api_name": "aerisapisdk.aerframesdk", "line_number": 38, "usage_type": "name"}, {"api_name": "aerisapisdk.exceptions.ApiException", "line_number": 51, "usage_type": "name"}, {"api_name": "logging.Formatter", "line_number": 106, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 107, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 109, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 110, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 114, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 120, "usage_type": "call"}, {"api_name": "aerisapisdk.aerisconfig.load_config", "line_number": 128, "usage_type": "call"}, {"api_name": "aerisapisdk.aerisconfig", "line_number": 128, "usage_type": "name"}, {"api_name": "json.load", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 139, "usage_type": "attribute"}]}
+{"seq_id": "70410228716", "text": "#coding:utf-8\nfrom django.contrib import admin\n\nfrom .models import (EquipmentType,\n Equipment,\n PropertyType,\n Property,\n EA,\n Reserve,\n Contract,\n Period,\n Prices)\nfrom .helpers import get_cache_props, get_cache_type\nfrom .mixins import PropertyMixin\n\nfrom collections import Counter\nimport hashlib\n\n\n@admin.register(EquipmentType)\nclass EquipmentTypeAdmin(admin.ModelAdmin):\n pass\n\n\nclass PropertyInline(admin.TabularInline):\n model = Property\n extra = 1\n\n\nclass PricesInline(admin.TabularInline):\n model = Prices\n extra = 1\n\n\n@admin.register(Equipment)\nclass EquipmentAdmin(PropertyMixin, admin.ModelAdmin):\n list_display = ('type', 'article', 'count', '_property')\n inlines = [PropertyInline, PricesInline]\n\n def response_add(self, request, new_object, post_url_continue=None):\n obj = self.after_saving_model_and_related_inlines(new_object)\n return super(EquipmentAdmin, self).response_add(request, obj, post_url_continue)\n\n def response_change(self, request, obj):\n obj = self.after_saving_model_and_related_inlines(obj)\n return super(EquipmentAdmin, self).response_change(request, obj)\n\n def after_saving_model_and_related_inlines(self, obj):\n # вычисляю хеш (тип + основные свойства)\n props = obj.property_set.filter(general=True)\n hash_base = u'_'.join([\n obj.type.name,\n u'_'.join([u'{}_{}'.format(p.type.name, p.value) for p in props])])\n obj.hash = hashlib.md5(hash_base.encode('utf-8')).hexdigest()\n obj.save()\n\n # нормализуем количество\n live_hash = Equipment.objects.values_list('hash', flat=True).distinct()\n for item_hash in live_hash:\n eq_sum = sum(Equipment.objects.filter(hash=item_hash).values_list('count', flat=True))\n try:\n ea = EA.objects.get(hash=item_hash)\n diff = eq_sum - (ea.count_in + ea.count_out)\n if diff:\n ea.count_in += diff\n ea.save()\n except EA.DoesNotExist:\n EA.objects.create(type=obj.type, count_in=obj.count, hash=item_hash)\n\n # удаляем те, у которых невалидный хеш\n # случай, ко��да у товара меняется хеш на новый\n # и в он был единственным представителем старого хеша\n EA.objects.exclude(hash__in=live_hash).delete()\n\n return obj\n\n\n@admin.register(PropertyType)\nclass PropertyTypeAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(Property)\nclass PropertyAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(EA)\nclass EAAdmin(PropertyMixin, admin.ModelAdmin):\n list_display = ('type', 'count_in', 'count_out', '_property')\n\n\n@admin.register(Reserve)\nclass ReserveAdmin(admin.ModelAdmin):\n list_display = ('user', 'status', '_inventory', '_reserve')\n\n def _inventory(self, obj):\n \"\"\"\n return: (4)Ботинки => Размер: 40 , (1)Сноуборд => Длина: 160, ...\n \"\"\"\n eqs = [u'{} => {}'.format(get_cache_type(item.hash),\n get_cache_props(item.hash))\n for item in obj.equipments.all()]\n return u' , '.join([u'({}){}'.format(v, k)\n for k, v in Counter(eqs).items()])\n _inventory.short_description = u'Инвентарь на руках'\n\n def _reserve(self, obj):\n \"\"\"\n return: (4)Ботинки => Размер: 40 , (1)Сноуборд => Длина: 160, ...\n \"\"\"\n return u' , '.join([u'({}){} => {}'.format(item.count,\n get_cache_type(item.ea.hash),\n get_cache_props(item.ea.hash))\n for item in obj.reserveea_set\n .select_related('ea__hash')\n .order_by('-count')])\n _reserve.short_description = u'Забронировано'\n\n\n@admin.register(Period)\nclass PeriodAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(Contract)\nclass ContractAdmin(admin.ModelAdmin):\n list_display = ('reserve', 'period', 'total', 'deposit', 'zip', 'active')\n", "repo_name": "cephey/rent", "sub_path": "src/inventory/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 4492, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 20, "usage_type": "call"}, {"api_name": "models.EquipmentType", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 20, "usage_type": "name"}, {"api_name": "django.contrib.admin.TabularInline", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}, {"api_name": "models.Property", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.admin.TabularInline", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Prices", "line_number": 31, "usage_type": "name"}, {"api_name": "mixins.PropertyMixin", "line_number": 36, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 36, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Equipment.objects.values_list", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Equipment.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Equipment", "line_number": 58, "usage_type": "name"}, {"api_name": "models.Equipment.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Equipment.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Equipment", "line_number": 60, "usage_type": "name"}, {"api_name": "models.EA.objects.get", "line_number": 62, "usage_type": "call"}, {"api_name": "models.EA.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.EA", "line_number": 62, "usage_type": "name"}, {"api_name": "models.EA.DoesNotExist", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.EA", "line_number": 67, "usage_type": "name"}, {"api_name": "models.EA.objects.create", "line_number": 68, "usage_type": "call"}, {"api_name": "models.EA.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "models.EA", "line_number": 68, "usage_type": "name"}, {"api_name": "models.EA.objects.exclude", "line_number": 73, "usage_type": "call"}, {"api_name": "models.EA.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.EA", "line_number": 73, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Equipment", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 79, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 78, "usage_type": "call"}, {"api_name": "models.PropertyType", "line_number": 78, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 78, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 84, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Property", "line_number": 83, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 83, "usage_type": "name"}, {"api_name": "mixins.PropertyMixin", "line_number": 89, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 89, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 89, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 88, "usage_type": "call"}, {"api_name": "models.EA", "line_number": 88, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 88, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 94, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 94, "usage_type": "name"}, {"api_name": "helpers.get_cache_type", "line_number": 101, "usage_type": "call"}, {"api_name": "helpers.get_cache_props", "line_number": 102, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 105, "usage_type": "call"}, {"api_name": "helpers.get_cache_type", "line_number": 113, "usage_type": "call"}, {"api_name": "helpers.get_cache_props", "line_number": 114, "usage_type": "call"}, {"api_name": "django.contrib.admin.register", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Reserve", "line_number": 93, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 93, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 122, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 121, "usage_type": "call"}, {"api_name": "models.Period", "line_number": 121, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 121, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 127, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 127, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 126, "usage_type": "call"}, {"api_name": "models.Contract", "line_number": 126, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 126, "usage_type": "name"}]}
+{"seq_id": "35796351307", "text": "# set connection string\n# get the connection string from sas azure portal\n\n# export AZURE_STORAGE_CONNECTION_STRING=\"\"\nimport os\n\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient\n\n# Retrieve the connection string for use with the application. The storage\n# connection string is stored in an environment variable on the machine\n# running the application called AZURE_STORAGE_CONNECTION_STRING. If the environment variable is\n# created after the application is launched in a console or with Visual Studio,\n# the shell or application needs to be closed and reloaded to take the\n# environment variable into account.\nconnect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')\nairflow_home = os.environ.get(\"AIRFLOW_HOME\")\n\nprint('connection started.')\n\nblob_service_client = BlobServiceClient.from_connection_string(connect_str)\n\nupload_file_path = f\"{airflow_home}/data/yellow_tripdata_2019-01.parquet\"\n\n# Create a blob client using the local file name as the name for the blob\nblob_client = blob_service_client.get_blob_client(\n container='datastores', blob=\"yellow_tripdata_2019-01.parquet\")\n\nprint('blob created.')\n\n\nwith open(upload_file_path, \"rb\") as data:\n blob_client.upload_blob(data)\n\nprint('complteed.')\n", "repo_name": "anil-chhetri/airflow-practise", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1268, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "os.getenv", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "azure.storage.blob.BlobServiceClient.from_connection_string", "line_number": 20, "usage_type": "call"}, {"api_name": "azure.storage.blob.BlobServiceClient", "line_number": 20, "usage_type": "name"}]}
+{"seq_id": "30984323294", "text": "#####################################################################\n# This file is part of the 4D Light Field Benchmark. #\n# #\n# This work is licensed under the Creative Commons #\n# Attribution-NonCommercial-ShareAlike 4.0 International License. #\n# To view a copy of this license, #\n# visit http://creativecommons.org/licenses/by-nc-sa/4.0/. #\n#####################################################################\n\nimport os\nimport sys\nimport torch\nimport numpy as np\n\n\ndef write_pfm(data, fpath, scale=1, file_identifier=b\"Pf\", dtype=\"float32\"):\n # PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html\n\n data = np.flipud(data)\n height, width = np.shape(data)[:2]\n values = np.ndarray.flatten(np.asarray(data, dtype=dtype))\n endianess = data.dtype.byteorder\n # print(endianess)\n\n if endianess == '<' or (endianess == '=' and sys.byteorder == 'little'):\n scale *= -1\n\n with open(fpath, 'wb') as file:\n # print(file_identifier + b'\\n')\n file.write(file_identifier + b'\\n')\n file.write(b'%d %d\\n' % (width, height))\n file.write(b'%d\\n' % scale)\n file.write(values)\n\n\ndef read_pfm(fpath, expected_identifier=b\"Pf\"):\n # PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html\n\n with open(fpath, 'rb') as f:\n # header\n identifier = _get_next_line(f)\n if identifier != expected_identifier:\n raise Exception('Unknown identifier. Expected: \"%s\", got: \"%s\".' % (\n expected_identifier, identifier))\n\n try:\n line_dimensions = _get_next_line(f).decode('ascii')\n # print(line_dimensions)\n dimensions = line_dimensions.split(' ')\n width = int(dimensions[0].strip())\n height = int(dimensions[1].strip())\n except:\n raise Exception('Could not parse dimensions: \"%s\". '\n 'Expected \"width height\", e.g. \"512 512\".' % line_dimensions)\n\n try:\n line_scale = _get_next_line(f)\n scale = float(line_scale)\n assert scale != 0\n if scale < 0:\n endianness = \"<\"\n else:\n endianness = \">\"\n except:\n raise Exception('Could not parse max value / endianess information: \"%s\". '\n 'Should be a non-zero number.' % line_scale)\n\n try:\n data = np.fromfile(f, \"%sf\" % endianness)\n data = np.reshape(data, (height, width))\n data = np.flipud(data)\n with np.errstate(invalid=\"ignore\"):\n data *= abs(scale)\n except:\n raise Exception(\n 'Invalid binary values. Could not create %dx%d array from input.' % (height, width))\n\n return data\n\n\ndef _get_next_line(f):\n next_line = f.readline().rstrip()\n # ignore comments\n while next_line.startswith(b'#'):\n next_line = f.readline().rstrip()\n return next_line\n\n\nif __name__ == '__main__':\n data = torch.rand(256, 256).mul(10000.0)\n data_np = data.numpy()\n write_pfm(data_np, 'temp.pfm')\n data_est_np = torch.from_numpy(read_pfm('temp.pfm').copy()).float()\n print(torch.min(torch.abs(data_est_np - data)))\n print(torch.max(torch.abs(data_est_np - data)))\n print(torch.mean(torch.abs(data_est_np - data)))\n print(data_est_np.min(), data.min())\n print(data_est_np.max(), data.max())\n", "repo_name": "tedyhabtegebrial/monocular_view_synthesis", "sub_path": "monocular/src/datasets/carla/pfm_rw.py", "file_name": "pfm_rw.py", "file_ext": "py", "file_size_in_byte": 3561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "numpy.flipud", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.ndarray.flatten", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.byteorder", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.fromfile", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 96, "usage_type": "call"}]}
+{"seq_id": "71164501037", "text": "import numpy as np\nimport tensorflow as tf\nimport gym\nfrom utils import *\nimport os\nimport time\nfrom Agents.TRPOAgentDiscrete import TRPO as TRPOD\nfrom Agents.TRPOAgent import TRPO\nimport argparse\nimport logging\nimport json\nfrom mpi4py import MPI\nimport sys\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\ncomm_size = comm.Get_size()\n# cpu = MPI.Get_processor_name()\n# print(\"Hello world from processor {}, process {} out of {}\".format(cpu,rank,comm_size))\n# sys.stdout.flush()\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nRESULTS_DIR = os.path.join(os.getcwd(), 'Results')\nif not os.path.exists(RESULTS_DIR):\n os.mkdir(RESULTS_DIR)\n\nparser = argparse.ArgumentParser(description='TRPO.')\nparser.add_argument(\"--task\", type=str, default='SpaceInvaders-ram-v0')\nparser.add_argument(\"--timesteps_per_batch\", type=int, default=40000)\nparser.add_argument(\"--n_steps\", type=int, default=1000000000)\nparser.add_argument(\"--n_iter\", type=int, default=100)\nparser.add_argument(\"--gamma\", type=float, default=.995)\nparser.add_argument(\"--max_kl\", type=float, default=.01)\nparser.add_argument(\"--cg_damping\", type=float, default=0.1)\nparser.add_argument(\"--monitor\", type=bool, default=False)\nparser.add_argument(\"--parallel_balancing\", type=str, default=\"timesteps\") # timesteps, episodes\nparser.add_argument(\"--discrete\", type=bool, default=True)\n\n# change these parameters for hyperparameter adaptation (kvfrans)\nparser.add_argument(\"--decay_method\", type=str, default=\"none\") # adaptive, none\nparser.add_argument(\"--timestep_adapt\", type=int, default=0)\nparser.add_argument(\"--kl_adapt\", type=float, default=0)\n\nargs = parser.parse_args()\nargs.max_pathlength = gym.spec(args.task).timestep_limit\nif rank == 0:\n print(args)\n sys.stdout.flush()\n\n# initialize TRPO learner on all processes, distribute the starting weights\nlearner_env = gym.make(args.task)\n\nif args.discrete:\n learner = TRPOD(args, learner_env)\nelse:\n learner = TRPO(args, learner_env)\nif rank == 0:\n # statbar = tf.contrib.keras.utils.Progbar(args.n_iter )\n new_policy_weights = learner.get_starting_weights()\nelse:\n new_policy_weights = None\n\nstart_time = time.time()\nhistory = {}\nhistory[\"rollout_time\"] = []\nhistory[\"learn_time\"] = []\nhistory[\"bcast_time\"] = []\nhistory[\"gather_time\"] = []\nhistory[\"iteration_time\"] = []\nhistory[\"mean_reward\"] = []\nhistory[\"timesteps\"] = []\nhistory[\"maxkl\"] = []\nhistory[\"episodes\"] = []\n\n# start it off with a big negative number\nlast_reward = -1000000\nrecent_total_reward = 0\n\ntotalsteps = 0\n\nstarting_timesteps = args.timesteps_per_batch\nstarting_kl = args.max_kl\n\niteration = 0\nisDone = 0\n\nlogging.getLogger().setLevel(logging.WARNING)\n\nwhile isDone == 0:\n iteration += 1\n\n # synchronize model and update actor weights locally\n bcast_start = time.time()\n new_policy_weights = comm.bcast(new_policy_weights, root=0)\n learner.set_policy_weights(new_policy_weights)\n bcast_time = (time.time() - bcast_start)\n\n # start worker processes collect experience for a minimum args.timesteps_per_batch timesteps\n rollout_start = time.time()\n data_paths, data_rewards = learner.rollout(args.timesteps_per_batch / comm_size)\n rollout_time = (time.time() - rollout_start)\n\n # gathering of experience on root process\n gather_start = time.time()\n paths, episodes_rewards = gather_paths(data_paths, data_rewards, comm, rank, args.parallel_balancing)\n gather_time = (time.time() - gather_start)\n\n # only master process does learning on TF graph\n if rank == 0:\n learn_start = time.time()\n if args.decay_method != \"none\":\n learner.adjust_kl(args.max_kl)\n new_policy_weights, stats = learner.learn(paths, episodes_rewards)\n learn_time = (time.time() - learn_start)\n iteration_time = rollout_time + learn_time + gather_time + bcast_time\n\n print((\"\\n-------- Iteration %d ----------\" % iteration))\n print((\"Reward Statistics:\"))\n for k, v in stats.items():\n print(\"\\t{} = {:.3f}\".format(k,v))\n print((\"Timing Statistics:\"))\n print((\"\\tBroadcast time = %.3f s\" % bcast_time))\n print((\"\\tRollout time = %.3f s\" % rollout_time))\n print((\"\\tGather time = %.3f s\" % gather_time))\n print((\"\\tLearn time = %.3f s\" % learn_time))\n print((\"\\tTotal iteration time = %.3f s\" % (rollout_time + learn_time + gather_time + bcast_time)))\n\n history[\"rollout_time\"].append(rollout_time)\n history[\"learn_time\"].append(learn_time)\n history[\"bcast_time\"].append(bcast_time)\n history[\"gather_time\"].append(gather_time)\n history[\"iteration_time\"].append(rollout_time + learn_time + gather_time + bcast_time)\n history[\"mean_reward\"].append(stats[\"Avg_Reward\"])\n history[\"timesteps\"].append(args.timesteps_per_batch)\n history[\"maxkl\"].append(args.max_kl)\n history[\"episodes\"].append(stats['Episodes'])\n\n # compute 100 episode average reward\n ep = 0\n it = iteration-1\n rew = 0\n while ep < 100 and it >= 0:\n ep += history['episodes'][it]\n rew += history['mean_reward'][it]*history['episodes'][it]\n it -= 1\n if ep == 0:\n print(\"*** Problem: no complete episodes collected, increase timesteps_per_batch!\")\n isDone = 1\n else:\n print((\"Cumulative Reward Statistics:\"))\n print((\"\\tMaximum Avg_reward = %.3f from iteration %d\" % (np.max(history[\"mean_reward\"]), 1+np.argmax(history[\"mean_reward\"]))))\n print((\"\\tLast %d Episode Avg_reward = %.3f\" % (ep, (rew / ep))))\n\n print((\"Cumulative Mean Timing Statistics:\"))\n print((\"\\tBroadcast time = %.3f s\" % np.mean(history[\"bcast_time\"])))\n print((\"\\tRollout time = %.3f s\" % np.mean(history[\"rollout_time\"])))\n print((\"\\tGather time = %.3f s\" % np.mean(history[\"gather_time\"])))\n print((\"\\tLearn time = %.3f s\" % np.mean(history[\"learn_time\"])))\n print((\"\\tTotal iteration time = %.3f s\" % np.mean(history[\"iteration_time\"])))\n\n # hyperparameter adaptation (kvfrans)\n recent_total_reward += stats[\"Avg_Reward\"]\n if args.decay_method == \"adaptive\":\n if iteration % 10 == 0:\n if recent_total_reward < last_reward:\n print(\"Policy is not improving. Decrease KL and increase steps.\")\n if args.timesteps_per_batch < 20000:\n args.timesteps_per_batch += args.timestep_adapt\n if args.max_kl > 0.001:\n args.max_kl -= args.kl_adapt\n else:\n print(\"Policy is improving. Increase KL and decrease steps.\")\n if args.timesteps_per_batch > 1200:\n args.timesteps_per_batch -= args.timestep_adapt\n if args.max_kl < 0.01:\n args.max_kl += args.kl_adapt\n last_reward = recent_total_reward\n recent_total_reward = 0\n if args.decay_method == \"adaptive-margin\":\n if iteration % 10 == 0:\n scaled_last = last_reward + abs(last_reward * 0.05)\n print((\"Last reward: %f Scaled: %f Recent: %f\" % (last_reward, scaled_last, recent_total_reward)))\n if recent_total_reward < scaled_last:\n print(\"Policy is not improving. Decrease KL and increase steps.\")\n if args.timesteps_per_batch < 10000:\n args.timesteps_per_batch += args.timestep_adapt\n if args.max_kl > 0.001:\n args.max_kl -= args.kl_adapt\n else:\n print(\"Policy is improving. Increase KL and decrease steps.\")\n if args.timesteps_per_batch > 1200:\n args.timesteps_per_batch -= args.timestep_adapt\n if args.max_kl < 0.01:\n args.max_kl += args.kl_adapt\n last_reward = recent_total_reward\n recent_total_reward = 0\n # print((\"Current step number is \" + str(args.timesteps_per_batch) + \" and KL is \" + str(args.max_kl)))\n\n if iteration % 10 == 0:\n with open(\"Results/%s-%d-%f-%d\" % (args.task, starting_timesteps, starting_kl, comm_size), \"w\") as outfile:\n json.dump(history,outfile)\n learner.save_weights(\"{}-{}-{}-{}_{}.ckpt\".format(args.task, starting_timesteps, starting_kl, comm_size, iteration))\n\n # statbar.add(1, [('Iteration Time',iteration_time ), (\"Brodcast Time\", bcast_start),\n # (\"Rollout time\", rollout_time), (\"Gather Time\", gather_time),\n # (\"Learn time\", learn_time)] + list(stats.items()))\n\n totalsteps += stats[\"Timesteps\"]\n print((\"%d total steps have happened (Elapsed time = %.3f s)\" % (totalsteps,time.time() - start_time)))\n sys.stdout.flush()\n if iteration >= args.n_iter or totalsteps >= args.n_steps:\n isDone = 1\n else:\n new_policy_weights = None\n\n isDone = comm.bcast(isDone, root=0)\n\nif rank == 0:\n print((\"\\n----- Evaluation complete! -----\"))\n", "repo_name": "jacobperricone/238FinalProject", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9184, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 15, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 15, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 25, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "gym.spec", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 48, "usage_type": "attribute"}, {"api_name": "gym.make", "line_number": 51, "usage_type": "call"}, {"api_name": "Agents.TRPOAgentDiscrete.TRPO", "line_number": 54, "usage_type": "call"}, {"api_name": "Agents.TRPOAgent.TRPO", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 87, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 87, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "time.time", "line_number": 106, "usage_type": "call"}, {"api_name": "time.time", "line_number": 110, "usage_type": "call"}, {"api_name": "time.time", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 159, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 201, "usage_type": "call"}, {"api_name": "time.time", "line_number": 209, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 210, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 210, "usage_type": "attribute"}]}
+{"seq_id": "73214229077", "text": "import asyncio as aio\nfrom utils import *\nfrom pl import PerfectLink\nfrom node_config import current_node, nodes, timeout\n\nclass BestEffortBroadcast():\n def __init__(self, upper_layer):\n self.pl = PerfectLink(self)\n self.upper_layer = upper_layer\n\n async def broadcast(self, message: Message):\n beb_m = Message(MessageType.BEB, current_node(), message)\n calls = []\n for node in nodes():\n calls.append(self.pl.send(beb_m, node, timeout))\n await aio.gather(*calls)\n\n async def deliver(self, message: Message):\n if message.type == MessageType.ACK:\n return await self.upper_layer.deliver(message) # if message is of type ACK let upper_layer handle it\n elif message.type != MessageType.BEB:\n raise ValueError('Best Effort Broadcast handles only messages with type ACK or BEB')\n await self.upper_layer.deliver(message.body)\n\n async def run(self):\n return await self.pl.run()\n", "repo_name": "bsmietanka/dist_algs", "sub_path": "beb.py", "file_name": "beb.py", "file_ext": "py", "file_size_in_byte": 984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "pl.PerfectLink", "line_number": 8, "usage_type": "call"}, {"api_name": "node_config.current_node", "line_number": 12, "usage_type": "call"}, {"api_name": "node_config.nodes", "line_number": 14, "usage_type": "call"}, {"api_name": "node_config.timeout", "line_number": 15, "usage_type": "argument"}, {"api_name": "asyncio.gather", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "33131381467", "text": "import os\nimport time\n\nimport stem\nimport stem.control\nimport stem.util.proc\nimport stem.util.str_tools\nimport stem.util.system\n\nimport nyx\nimport nyx.curses\nimport nyx.panel\nimport nyx.popups\nimport nyx.tracker\n\nfrom stem.util import conf, log\nfrom nyx import nyx_interface, tor_controller\n\nfrom nyx.curses import RED, GREEN, YELLOW, CYAN, WHITE, BOLD, HIGHLIGHT\n\nMIN_DUAL_COL_WIDTH = 141 # minimum width where we'll show two columns\nSHOW_FD_THRESHOLD = 60 # show file descriptor usage if usage is over this percentage\nUPDATE_RATE = 5 # rate in seconds at which we refresh\n\nCONFIG = conf.config_dict('nyx', {\n 'attr.flag_colors': {},\n 'attr.version_status_colors': {},\n})\n\n\nclass HeaderPanel(nyx.panel.DaemonPanel):\n \"\"\"\n Top area containing tor settings and system information.\n \"\"\"\n\n def __init__(self):\n nyx.panel.DaemonPanel.__init__(self, UPDATE_RATE)\n self._vals = Sampling.create()\n\n self._last_width = nyx.curses.screen_size().width\n self._reported_inactive = False\n self._pause_time = 0\n\n self._message = None\n self._message_attr = []\n\n tor_controller().add_status_listener(self._reset_listener)\n\n def show_message(self, message = None, *attr, **kwargs):\n \"\"\"\n Sets the message displayed at the bottom of the header. If not called with\n anything it clears the override.\n\n :param str message: message to be displayed\n :param list attr: text attributes to apply\n :param int max_wait: seconds to wait for user input, no limit if **None**\n\n :returns: :class:`~nyx.curses.KeyInput` user pressed if provided a\n **max_wait**, **None** otherwise or if prompt was canceled\n \"\"\"\n\n self._message = message\n self._message_attr = attr\n self.redraw()\n\n if 'max_wait' in kwargs:\n user_input = nyx.curses.key_input(kwargs['max_wait'])\n self.show_message() # clear override\n return user_input\n\n def is_wide(self):\n \"\"\"\n True if we should show two columns of information, False otherwise.\n \"\"\"\n\n return self._last_width >= MIN_DUAL_COL_WIDTH\n\n def get_height(self):\n \"\"\"\n Provides the height of the content, which is dynamically determined by the\n panel's maximum width.\n \"\"\"\n\n max_height = nyx.panel.DaemonPanel.get_height(self)\n\n if self._vals.is_relay:\n return min(max_height, 5 if self.is_wide() else 7)\n else:\n return min(max_height, 4 if self.is_wide() else 5)\n\n def send_newnym(self):\n \"\"\"\n Requests a new identity and provides a visual queue.\n \"\"\"\n\n controller = tor_controller()\n\n if not controller.is_newnym_available():\n return\n\n controller.signal(stem.Signal.NEWNYM)\n\n # If we're wide then the newnym label in this panel will give an\n # indication that the signal was sent. Otherwise use a msg.\n\n if not self.is_wide():\n self.show_message('Requesting a new identity', HIGHLIGHT, max_wait = 1)\n\n def set_paused(self, is_pause):\n if is_pause:\n self._pause_time = time.time()\n\n def key_handlers(self):\n def _reconnect():\n if self._vals.is_connected:\n return\n\n controller = tor_controller()\n self.show_message('Reconnecting...', HIGHLIGHT)\n\n try:\n try:\n controller.reconnect(chroot_path = nyx.chroot())\n except stem.connection.MissingPassword:\n password = nyx.input_prompt('Controller Password: ')\n\n if password:\n controller.authenticate(password)\n\n log.notice(\"Reconnected to Tor's control port\")\n self.show_message('Tor reconnected', HIGHLIGHT, max_wait = 1)\n except Exception as exc:\n self.show_message('Unable to reconnect (%s)' % exc, HIGHLIGHT, max_wait = 3)\n controller.close()\n\n return (\n nyx.panel.KeyHandler('n', action = self.send_newnym),\n nyx.panel.KeyHandler('r', action = _reconnect),\n )\n\n def _draw(self, subwindow):\n vals = self._vals # local reference to avoid concurrency concerns\n self._last_width = subwindow.width\n is_wide = self.is_wide()\n\n # space available for content\n\n interface = nyx_interface()\n left_width = max(subwindow.width // 2, 77) if is_wide else subwindow.width\n right_width = subwindow.width - left_width\n\n _draw_platform_section(subwindow, 0, 0, left_width, vals)\n\n if vals.is_connected:\n _draw_ports_section(subwindow, 0, 1, left_width, vals)\n else:\n _draw_disconnected(subwindow, 0, 1, vals.last_heartbeat)\n\n if is_wide:\n _draw_resource_usage(subwindow, left_width, 0, right_width, vals, self._pause_time)\n\n if vals.is_relay:\n _draw_fingerprint_and_fd_usage(subwindow, left_width, 1, right_width, vals)\n _draw_flags(subwindow, 0, 2, vals.flags)\n _draw_exit_policy(subwindow, left_width, 2, vals.exit_policy)\n elif vals.is_connected:\n _draw_newnym_option(subwindow, left_width, 1, vals.newnym_wait)\n else:\n _draw_resource_usage(subwindow, 0, 2, left_width, vals, self._pause_time)\n\n if vals.is_relay:\n _draw_fingerprint_and_fd_usage(subwindow, 0, 3, left_width, vals)\n _draw_flags(subwindow, 0, 4, vals.flags)\n\n _draw_status(subwindow, 0, self.get_height() - 1, interface.is_paused(), self._message, *self._message_attr)\n\n def _reset_listener(self, controller, event_type, _):\n self._update()\n\n if event_type == stem.control.State.CLOSED:\n log.notice('Tor control port closed')\n\n def _update(self):\n self._vals = Sampling.create(self._vals)\n\n if self._vals.fd_used and self._vals.fd_limit != -1:\n fd_percent = 100 * self._vals.fd_used // self._vals.fd_limit\n\n if fd_percent >= 90:\n log_msg = \"Tor's file descriptor usage is at %s%%. If you run out Tor will be unable to continue functioning.\" % fd_percent\n log.log_once('fd_used_at_ninety_percent', log.WARN, log_msg)\n log.DEDUPLICATION_MESSAGE_IDS.add('fd_used_at_sixty_percent')\n elif fd_percent >= 60:\n log_msg = \"Tor's file descriptor usage is at %s%%.\" % fd_percent\n log.log_once('fd_used_at_sixty_percent', log.NOTICE, log_msg)\n\n if self._vals.is_connected:\n if not self._reported_inactive and (time.time() - self._vals.last_heartbeat) >= 10:\n self._reported_inactive = True\n log.notice('Relay unresponsive (last heartbeat: %s)' % time.ctime(self._vals.last_heartbeat))\n elif self._reported_inactive and (time.time() - self._vals.last_heartbeat) < 10:\n self._reported_inactive = False\n log.notice('Relay resumed')\n\n self.redraw()\n\n\nclass Sampling(object):\n def __init__(self, **attr):\n self._attr = attr\n\n for key, value in attr.items():\n setattr(self, key, value)\n\n @staticmethod\n def create(last_sampling = None):\n controller = tor_controller()\n retrieved = time.time()\n\n pid = controller.get_pid('')\n tor_resources = nyx.tracker.get_resource_tracker().get_value()\n nyx_total_cpu_time = sum(os.times()[:3], stem.util.system.SYSTEM_CALL_TIME)\n\n or_listeners = controller.get_listeners(stem.control.Listener.OR, [])\n control_listeners = controller.get_listeners(stem.control.Listener.CONTROL, [])\n my_router_status_entry = nyx.tracker.get_consensus_tracker().my_router_status_entry()\n\n if controller.get_conf('HashedControlPassword', None):\n auth_type = 'password'\n elif controller.get_conf('CookieAuthentication', None) == '1':\n auth_type = 'cookie'\n else:\n auth_type = 'open'\n\n try:\n fd_used = stem.util.proc.file_descriptors_used(pid)\n except IOError:\n fd_used = None\n\n if last_sampling:\n nyx_cpu_delta = nyx_total_cpu_time - last_sampling.nyx_total_cpu_time\n nyx_time_delta = retrieved - last_sampling.retrieved\n nyx_cpu = nyx_cpu_delta / nyx_time_delta\n else:\n nyx_cpu = 0.0\n\n attr = {\n 'retrieved': retrieved,\n 'is_connected': controller.is_alive(),\n 'connection_time': controller.connection_time(),\n 'last_heartbeat': controller.get_latest_heartbeat(),\n\n 'fingerprint': controller.get_info('fingerprint', 'Unknown'),\n 'nickname': controller.get_conf('Nickname', ''),\n 'newnym_wait': controller.get_newnym_wait(),\n 'exit_policy': controller.get_exit_policy(None),\n 'flags': getattr(my_router_status_entry, 'flags', []),\n\n 'version': str(controller.get_version('Unknown')).split()[0],\n 'version_status': controller.get_info('status/version/current', 'Unknown'),\n\n 'address': or_listeners[0][0] if (or_listeners and or_listeners[0][0] != '0.0.0.0') else controller.get_info('address', 'Unknown'),\n 'or_port': or_listeners[0][1] if or_listeners else '',\n 'dir_port': controller.get_conf('DirPort', '0'),\n 'control_port': str(control_listeners[0][1]) if control_listeners else None,\n 'socket_path': controller.get_conf('ControlSocket', None),\n 'is_relay': bool(or_listeners),\n\n 'auth_type': auth_type,\n 'pid': pid,\n 'start_time': controller.get_start_time(0),\n 'fd_limit': int(controller.get_info('process/descriptor-limit', '-1')),\n 'fd_used': fd_used,\n\n 'nyx_total_cpu_time': nyx_total_cpu_time,\n 'tor_cpu': '%0.1f' % (100 * tor_resources.cpu_sample),\n 'nyx_cpu': '%0.1f' % (100 * nyx_cpu),\n 'memory': stem.util.str_tools.size_label(tor_resources.memory_bytes) if tor_resources.memory_bytes > 0 else 0,\n 'memory_percent': '%0.1f' % (100 * tor_resources.memory_percent),\n\n 'hostname': os.uname()[1],\n 'platform': '%s %s' % (os.uname()[0], os.uname()[2]), # [platform name] [version]\n }\n\n return Sampling(**attr)\n\n def format(self, message, crop_width = None):\n formatted_msg = message.format(**self._attr)\n\n if crop_width is not None:\n formatted_msg = stem.util.str_tools.crop(formatted_msg, crop_width)\n\n return formatted_msg\n\n\ndef _draw_platform_section(subwindow, x, y, width, vals):\n \"\"\"\n Section providing the user's hostname, platform, and version information...\n\n nyx - odin (Linux 3.5.0-52-generic) Tor 0.2.5.1-alpha-dev (unrecommended)\n |------ platform (40 characters) ------| |----------- tor version -----------|\n \"\"\"\n\n initial_x, space_left = x, min(width, 40)\n\n x = subwindow.addstr(x, y, vals.format('nyx - {hostname}', space_left))\n space_left -= x - initial_x\n\n if space_left >= 10:\n subwindow.addstr(x, y, ' (%s)' % vals.format('{platform}', space_left - 3))\n\n x, space_left = initial_x + 43, width - 43\n\n if vals.version != 'Unknown' and space_left >= 10:\n x = subwindow.addstr(x, y, vals.format('Tor {version}', space_left))\n space_left -= x - 43 - initial_x\n\n if space_left >= 7 + len(vals.version_status):\n version_color = CONFIG['attr.version_status_colors'].get(vals.version_status, WHITE)\n\n x = subwindow.addstr(x, y, ' (')\n x = subwindow.addstr(x, y, vals.version_status, version_color)\n subwindow.addstr(x, y, ')')\n\n\ndef _draw_ports_section(subwindow, x, y, width, vals):\n \"\"\"\n Section providing our nickname, address, and port information...\n\n Unnamed - 0.0.0.0:7000, Control Port (cookie): 9051\n \"\"\"\n\n if not vals.is_relay:\n x = subwindow.addstr(x, y, 'Relaying Disabled', CYAN)\n else:\n x = subwindow.addstr(x, y, vals.format('{nickname} - {address}:{or_port}'))\n\n if vals.dir_port != '0':\n x = subwindow.addstr(x, y, vals.format(', Dir Port: {dir_port}'))\n\n if vals.control_port:\n if width >= x + 19 + len(vals.control_port) + len(vals.auth_type):\n auth_color = RED if vals.auth_type == 'open' else GREEN\n\n x = subwindow.addstr(x, y, ', Control Port (')\n x = subwindow.addstr(x, y, vals.auth_type, auth_color)\n subwindow.addstr(x, y, vals.format('): {control_port}'))\n else:\n subwindow.addstr(x, y, vals.format(', Control Port: {control_port}'))\n elif vals.socket_path:\n subwindow.addstr(x, y, vals.format(', Control Socket: {socket_path}'))\n\n\ndef _draw_disconnected(subwindow, x, y, last_heartbeat):\n \"\"\"\n Message indicating that tor is disconnected...\n\n Tor Disconnected (15:21 07/13/2014, press r to reconnect)\n \"\"\"\n\n x = subwindow.addstr(x, y, 'Tor Disconnected', RED, BOLD)\n last_heartbeat_str = time.strftime('%H:%M %m/%d/%Y', time.localtime(last_heartbeat))\n subwindow.addstr(x, y, ' (%s, press r to reconnect)' % last_heartbeat_str)\n\n\ndef _draw_resource_usage(subwindow, x, y, width, vals, pause_time):\n \"\"\"\n System resource usage of the tor process...\n\n cpu: 0.0% tor, 1.0% nyx mem: 0 (0.0%) pid: 16329 uptime: 12-20:42:07\n \"\"\"\n\n if vals.start_time:\n if not vals.is_connected:\n now = vals.connection_time\n elif pause_time:\n now = pause_time\n else:\n now = time.time()\n\n uptime = stem.util.str_tools.short_time_label(max(0, now - vals.start_time))\n else:\n uptime = ''\n\n sys_fields = (\n (0, vals.format('cpu: {tor_cpu}% tor, {nyx_cpu}% nyx')),\n (27, vals.format('mem: {memory} ({memory_percent}%)')),\n (47, vals.format('pid: {pid}')),\n (59, 'uptime: %s' % uptime),\n )\n\n for (start, label) in sys_fields:\n if width >= start + len(label):\n subwindow.addstr(x + start, y, label)\n else:\n break\n\n\ndef _draw_fingerprint_and_fd_usage(subwindow, x, y, width, vals):\n \"\"\"\n Presents our fingerprint, and our file descriptor usage if we're running\n out...\n\n fingerprint: 1A94D1A794FCB2F8B6CBC179EF8FDD4008A98D3B, file desc: 900 / 1000 (90%)\n \"\"\"\n\n initial_x, space_left = x, width\n\n x = subwindow.addstr(x, y, vals.format('fingerprint: {fingerprint}', width))\n space_left -= x - initial_x\n\n if space_left >= 30 and vals.fd_used and vals.fd_limit != -1:\n fd_percent = 100 * vals.fd_used / vals.fd_limit\n\n if fd_percent >= SHOW_FD_THRESHOLD:\n if fd_percent >= 95:\n percentage_format = (RED, BOLD)\n elif fd_percent >= 90:\n percentage_format = (RED,)\n elif fd_percent >= 60:\n percentage_format = (YELLOW,)\n else:\n percentage_format = ()\n\n x = subwindow.addstr(x, y, ', file descriptors' if space_left >= 37 else ', file desc')\n x = subwindow.addstr(x, y, vals.format(': {fd_used} / {fd_limit} ('))\n x = subwindow.addstr(x, y, '%i%%' % fd_percent, *percentage_format)\n subwindow.addstr(x, y, ')')\n\n\ndef _draw_flags(subwindow, x, y, flags):\n \"\"\"\n Presents flags held by our relay...\n\n flags: Running, Valid\n \"\"\"\n\n x = subwindow.addstr(x, y, 'flags: ')\n\n if flags:\n for i, flag in enumerate(flags):\n flag_color = CONFIG['attr.flag_colors'].get(flag, WHITE)\n x = subwindow.addstr(x, y, flag, flag_color, BOLD)\n\n if i < len(flags) - 1:\n x = subwindow.addstr(x, y, ', ')\n else:\n subwindow.addstr(x, y, 'none', CYAN, BOLD)\n\n\ndef _draw_exit_policy(subwindow, x, y, exit_policy):\n \"\"\"\n Presents our exit policy...\n\n exit policy: reject *:*\n \"\"\"\n\n x = subwindow.addstr(x, y, 'exit policy: ')\n\n if not exit_policy:\n return\n\n rules = list(exit_policy.strip_private().strip_default())\n\n for i, rule in enumerate(rules):\n policy_color = GREEN if rule.is_accept else RED\n x = subwindow.addstr(x, y, str(rule), policy_color, BOLD)\n\n if i < len(rules) - 1:\n x = subwindow.addstr(x, y, ', ')\n\n if exit_policy.has_default():\n if rules:\n x = subwindow.addstr(x, y, ', ')\n\n subwindow.addstr(x, y, '', CYAN, BOLD)\n\n\ndef _draw_newnym_option(subwindow, x, y, newnym_wait):\n \"\"\"\n Provide a notice for requiesting a new identity, and time until it's next\n available if in the process of building circuits.\n \"\"\"\n\n if newnym_wait == 0:\n subwindow.addstr(x, y, \"press 'n' for a new identity\")\n else:\n plural = 's' if newnym_wait > 1 else ''\n subwindow.addstr(x, y, 'building circuits, available again in %i second%s' % (newnym_wait, plural))\n\n\ndef _draw_status(subwindow, x, y, is_paused, message, *attr):\n \"\"\"\n Provides general usage information or a custom message.\n \"\"\"\n\n if message:\n subwindow.addstr(x, y, message, *attr)\n elif not is_paused:\n interface = nyx_interface()\n subwindow.addstr(x, y, 'page %i / %i - m: menu, p: pause, h: page help, q: quit' % (interface.get_page() + 1, interface.page_count()))\n else:\n subwindow.addstr(x, y, 'Paused', HIGHLIGHT)\n", "repo_name": "torproject/nyx", "sub_path": "nyx/panel/header.py", "file_name": "header.py", "file_ext": "py", "file_size_in_byte": 16082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 102, "dataset": "github-code", "pt": "73", "api": [{"api_name": "stem.util.conf.config_dict", "line_number": 25, "usage_type": "call"}, {"api_name": "stem.util.conf", "line_number": 25, "usage_type": "name"}, {"api_name": "nyx.panel", "line_number": 31, "usage_type": "attribute"}, {"api_name": "nyx.panel.DaemonPanel.__init__", "line_number": 37, "usage_type": "call"}, {"api_name": "nyx.panel", "line_number": 37, "usage_type": "attribute"}, {"api_name": "nyx.curses.screen_size", "line_number": 40, "usage_type": "call"}, {"api_name": "nyx.curses", "line_number": 40, "usage_type": "attribute"}, {"api_name": "nyx.tor_controller", "line_number": 47, "usage_type": "call"}, {"api_name": "nyx.curses.key_input", "line_number": 67, "usage_type": "call"}, {"api_name": "nyx.curses", "line_number": 67, "usage_type": "attribute"}, {"api_name": "nyx.panel.DaemonPanel.get_height", "line_number": 84, "usage_type": "call"}, {"api_name": "nyx.panel", "line_number": 84, "usage_type": "attribute"}, {"api_name": "nyx.tor_controller", "line_number": 96, "usage_type": "call"}, {"api_name": "stem.Signal", "line_number": 101, "usage_type": "attribute"}, {"api_name": "nyx.curses.HIGHLIGHT", "line_number": 107, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 111, "usage_type": "call"}, {"api_name": "nyx.tor_controller", "line_number": 118, "usage_type": "call"}, {"api_name": "nyx.curses.HIGHLIGHT", "line_number": 119, "usage_type": "argument"}, {"api_name": "nyx.chroot", "line_number": 123, "usage_type": "call"}, {"api_name": "stem.connection", "line_number": 124, "usage_type": "attribute"}, {"api_name": "nyx.input_prompt", "line_number": 125, "usage_type": "call"}, {"api_name": "stem.util.log.notice", "line_number": 130, "usage_type": "call"}, {"api_name": "stem.util.log", "line_number": 130, "usage_type": "name"}, {"api_name": "nyx.curses.HIGHLIGHT", "line_number": 131, "usage_type": "argument"}, {"api_name": "nyx.curses.HIGHLIGHT", "line_number": 133, "usage_type": "argument"}, {"api_name": "nyx.panel.KeyHandler", "line_number": 137, "usage_type": "call"}, {"api_name": "nyx.panel", "line_number": 137, "usage_type": "attribute"}, {"api_name": "nyx.panel.KeyHandler", "line_number": 138, "usage_type": "call"}, {"api_name": "nyx.panel", "line_number": 138, "usage_type": "attribute"}, {"api_name": "nyx.nyx_interface", "line_number": 148, "usage_type": "call"}, {"api_name": "stem.control", "line_number": 180, "usage_type": "attribute"}, {"api_name": "stem.util.log.notice", "line_number": 181, "usage_type": "call"}, {"api_name": "stem.util.log", "line_number": 181, "usage_type": "name"}, {"api_name": "stem.util.log.log_once", "line_number": 191, "usage_type": "call"}, {"api_name": "stem.util.log", "line_number": 191, "usage_type": "name"}, {"api_name": "stem.util.log.WARN", "line_number": 191, "usage_type": "attribute"}, {"api_name": "stem.util.log.DEDUPLICATION_MESSAGE_IDS.add", "line_number": 192, "usage_type": "call"}, {"api_name": "stem.util.log.DEDUPLICATION_MESSAGE_IDS", "line_number": 192, "usage_type": "attribute"}, {"api_name": "stem.util.log", "line_number": 192, "usage_type": "name"}, {"api_name": "stem.util.log.log_once", "line_number": 195, "usage_type": "call"}, {"api_name": "stem.util.log", "line_number": 195, "usage_type": "name"}, {"api_name": "stem.util.log.NOTICE", "line_number": 195, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 198, "usage_type": "call"}, {"api_name": "stem.util.log.notice", "line_number": 200, "usage_type": "call"}, {"api_name": "stem.util.log", "line_number": 200, "usage_type": "name"}, {"api_name": "time.ctime", "line_number": 200, "usage_type": "call"}, {"api_name": "time.time", "line_number": 201, "usage_type": "call"}, {"api_name": "stem.util.log.notice", "line_number": 203, "usage_type": "call"}, {"api_name": "stem.util.log", "line_number": 203, "usage_type": "name"}, {"api_name": "nyx.tor_controller", "line_number": 217, "usage_type": "call"}, {"api_name": "time.time", "line_number": 218, "usage_type": "call"}, {"api_name": "nyx.tracker.get_resource_tracker", "line_number": 221, "usage_type": "call"}, {"api_name": "nyx.tracker", "line_number": 221, "usage_type": "attribute"}, {"api_name": "os.times", "line_number": 222, "usage_type": "call"}, {"api_name": "stem.util", "line_number": 222, "usage_type": "attribute"}, {"api_name": "stem.control", "line_number": 224, "usage_type": "attribute"}, {"api_name": "stem.control", "line_number": 225, "usage_type": "attribute"}, {"api_name": "nyx.tracker.get_consensus_tracker", "line_number": 226, "usage_type": "call"}, {"api_name": "nyx.tracker", "line_number": 226, "usage_type": "attribute"}, {"api_name": "stem.util.proc.file_descriptors_used", "line_number": 236, "usage_type": "call"}, {"api_name": "stem.util", "line_number": 236, "usage_type": "attribute"}, {"api_name": "stem.util.str_tools.size_label", "line_number": 278, "usage_type": "call"}, {"api_name": "stem.util", "line_number": 278, "usage_type": "attribute"}, {"api_name": "os.uname", "line_number": 281, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 282, "usage_type": "call"}, {"api_name": "stem.util.str_tools.crop", "line_number": 291, "usage_type": "call"}, {"api_name": "stem.util", "line_number": 291, "usage_type": "attribute"}, {"api_name": "nyx.curses.WHITE", "line_number": 319, "usage_type": "argument"}, {"api_name": "nyx.curses.CYAN", "line_number": 334, "usage_type": "argument"}, {"api_name": "nyx.curses.RED", "line_number": 343, "usage_type": "name"}, {"api_name": "nyx.curses.GREEN", "line_number": 343, "usage_type": "name"}, {"api_name": "nyx.curses.RED", "line_number": 361, "usage_type": "argument"}, {"api_name": "nyx.curses.BOLD", "line_number": 361, "usage_type": "argument"}, {"api_name": "time.strftime", "line_number": 362, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 362, "usage_type": "call"}, {"api_name": "time.time", "line_number": 379, "usage_type": "call"}, {"api_name": "stem.util.str_tools.short_time_label", "line_number": 381, "usage_type": "call"}, {"api_name": "stem.util", "line_number": 381, "usage_type": "attribute"}, {"api_name": "nyx.curses.RED", "line_number": 417, "usage_type": "name"}, {"api_name": "nyx.curses.BOLD", "line_number": 417, "usage_type": "name"}, {"api_name": "nyx.curses.RED", "line_number": 419, "usage_type": "name"}, {"api_name": "nyx.curses.YELLOW", "line_number": 421, "usage_type": "name"}, {"api_name": "nyx.curses.WHITE", "line_number": 442, "usage_type": "argument"}, {"api_name": "nyx.curses.BOLD", "line_number": 443, "usage_type": "argument"}, {"api_name": "nyx.curses.CYAN", "line_number": 448, "usage_type": "argument"}, {"api_name": "nyx.curses.BOLD", "line_number": 448, "usage_type": "argument"}, {"api_name": "nyx.curses.GREEN", "line_number": 466, "usage_type": "name"}, {"api_name": "nyx.curses.RED", "line_number": 466, "usage_type": "name"}, {"api_name": "nyx.curses.BOLD", "line_number": 467, "usage_type": "argument"}, {"api_name": "nyx.curses.CYAN", "line_number": 476, "usage_type": "argument"}, {"api_name": "nyx.curses.BOLD", "line_number": 476, "usage_type": "argument"}, {"api_name": "nyx.nyx_interface", "line_number": 500, "usage_type": "call"}, {"api_name": "nyx.curses.HIGHLIGHT", "line_number": 503, "usage_type": "argument"}]}
+{"seq_id": "20748442676", "text": "import math\nimport sys\nimport time\n\nimport torch\n\nfrom ..utils import utils\nfrom ..utils.metric_logger import MetricLogger, SmoothedValue\n\n\ndef train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, writer):\n model.train()\n metric_logger = MetricLogger(delimiter=\" \", writer=writer)\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n\n epoch_results = {}\n\n lr_scheduler = None\n if epoch == 0:\n warmup_factor = 1. / 1000\n warmup_iters = min(1000, len(data_loader) - 1)\n\n lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)\n\n for images, targets in metric_logger.log_every(data_loader, print_freq, epoch, header):\n images = list(image.to(device) for image in images)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n\n loss_dict = model(images, targets)\n\n losses = sum(loss for loss in loss_dict.values())\n\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = utils.reduce_dict(loss_dict)\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n\n loss_value = losses_reduced.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training:\\n{}\".format(loss_value, loss_dict))\n sys.exit(1)\n\n optimizer.zero_grad()\n losses.backward()\n optimizer.step()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n metric_logger.update(loss=losses_reduced, **loss_dict_reduced)\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n\n\n@torch.no_grad()\ndef evaluate(model, data_loader, device, writer, epoch, threshold=0.5):\n n_threads = torch.get_num_threads()\n # FIXME remove this and make paste_masks_in_image run on the GPU\n torch.set_num_threads(1)\n cpu_device = torch.device(\"cpu\")\n model.eval()\n metric_logger = MetricLogger(delimiter=\" \", writer=writer)\n header = 'Test:'\n\n total, correct = 0, 0\n\n for image, targets in metric_logger.log_every(data_loader, 50, epoch=epoch, header=header):\n image = list(img.to(device) for img in image)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n targets_labels = torch.as_tensor([int(1 in target[\"labels\"]) for target in targets], dtype=torch.int8)\n\n torch.cuda.synchronize()\n model_time = time.time()\n outputs = model(image)\n\n outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]\n # Filter score only superior as threshold=0.5\n outputs_filtred = []\n for output in outputs:\n output[\"labels\"] = output[\"labels\"][output[\"scores\"] >= threshold]\n # output[\"scores\"] = output[\"scores\"][output[\"scores\"] >= threshold]\n if 1 in output[\"labels\"]:\n outputs_filtred.append(1)\n\n outputs_filtred = torch.as_tensor(outputs_filtred, dtype=torch.int8)\n model_time = time.time() - model_time\n\n total += len(image)\n correct += (targets_labels == outputs_filtred).sum().item()\n\n res = {target[\"image_id\"].item(): output for target, output in zip(targets, outputs)}\n metric_logger.update(model_time=model_time)\n\n print(\"Test accuracy :\", correct / total)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n\n torch.set_num_threads(n_threads)\n writer.add_scalar(\"Accuracy/eval\", correct / total, epoch)\n\n\nimport numpy as np\n\n\ndef get_model_scores(pred_boxes):\n \"\"\"Creates a dictionary of from model_scores to image ids.\n Args:\n pred_boxes (dict): dict of dicts of 'boxes' and 'scores'\n Returns:\n dict: keys are model_scores and values are image ids (usually filenames)\n \"\"\"\n model_score = {}\n for img_id, val in pred_boxes.items():\n for score in val['scores']:\n if score not in model_score.keys():\n model_score[score] = [img_id]\n else:\n model_score[score].append(img_id)\n return model_score\n\n\ndef iou_pytorch(outputs: torch.Tensor, labels: torch.Tensor):\n SMOOTH = 1e-6\n # You can comment out this line if you are passing tensors of equal shape\n # But if you are passing output from UNet or something it will most probably\n # be with the BATCH x 1 x H x W shape\n outputs = outputs.squeeze(1) # BATCH x 1 x H x W => BATCH x H x W\n intersection = (outputs & labels).float().sum((1, 2)) # Will be zero if Truth=0 or Prediction=0\n union = (outputs | labels).float().sum((1, 2)) # Will be zzero if both are 0\n iou = (intersection + SMOOTH) / (union + SMOOTH) # We smooth our devision to avoid 0/0\n thresholded = torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10 # This is equal to comparing with thresolds\n return thresholded # Or thresholded.mean() if you are interested in average across the batch\n\n\ndef calc_precision_recall(image_results):\n \"\"\"Calculates precision and recall from the set of images\n Args:\n img_results (dict): dictionary formatted like:\n {\n 'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},\n 'img_id2': ...\n ...\n }\n Returns:\n tuple: of floats of (precision, recall)\n \"\"\"\n tp, fp, fn = 0, 0, 0\n precision, recall = 0, 0\n for img_id, res in image_results.items():\n tp += res['TP']\n fp += res['FP']\n fn += res['FN']\n try:\n precision = tp / (tp + fp)\n except ZeroDivisionError:\n precision = 0.0\n try:\n recall = tp / (tp + fn)\n except ZeroDivisionError:\n recall = 0.0\n return precision, recall\n\n\ndef get_single_image_results(gt_boxes, pred_boxes, iou_thr):\n \"\"\"Calculates number of true_pos, false_pos, false_neg from single batch of boxes.\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)\n and 'scores'\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n Returns:\n dict: true positives (int), false positives (int), false negatives (int)\n \"\"\"\n all_pred_indices = range(len(pred_boxes))\n all_gt_indices = range(len(gt_boxes))\n if len(all_pred_indices) == 0:\n tp = 0\n fp = 0\n fn = 0\n return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}\n if len(all_gt_indices) == 0:\n tp = 0\n fp = 0\n fn = 0\n return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}\n\n gt_idx_thr = []\n pred_idx_thr = []\n ious = []\n for ipb, pred_box in enumerate(pred_boxes):\n for igb, gt_box in enumerate(gt_boxes):\n iou = calc_iou(gt_box, pred_box)\n\n if iou > iou_thr:\n gt_idx_thr.append(igb)\n pred_idx_thr.append(ipb)\n ious.append(iou)\n iou_sort = np.argsort(ious)[::1]\n if len(iou_sort) == 0:\n tp = 0\n fp = 0\n fn = 0\n return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}\n else:\n gt_match_idx = []\n pred_match_idx = []\n for idx in iou_sort:\n gt_idx = gt_idx_thr[idx]\n pr_idx = pred_idx_thr[idx]\n # If the boxes are unmatched, add them to matches\n if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):\n gt_match_idx.append(gt_idx)\n pred_match_idx.append(pr_idx)\n tp = len(gt_match_idx)\n fp = len(pred_boxes) - len(pred_match_idx)\n fn = len(gt_boxes) - len(gt_match_idx)\n return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}\n\n\ndef get_avg_precision_at_iou(gt_boxes, pred_bb, iou_thr=0.5):\n model_scores = get_model_scores(pred_bb)\n sorted_model_scores = sorted(model_scores.keys())\n # Sort the predicted boxes in descending order (lowest scoring boxes first):\n for img_id in pred_bb.keys():\n arg_sort = np.argsort(pred_bb[img_id]['scores'])\n pred_bb[img_id]['scores'] = np.array(pred_bb[img_id]['scores'])[arg_sort].tolist()\n pred_bb[img_id]['boxes'] = np.array(pred_bb[img_id]['boxes'])[arg_sort].tolist()\n\n pred_boxes_pruned = deepcopy(pred_bb)\n\n precisions = []\n recalls = []\n model_thrs = []\n img_results = {}\n\n # Loop over model score thresholds and calculate precision, recall\n for ithr, model_score_thr in enumerate(sorted_model_scores[:-1]):\n # On first iteration, define img_results for the first time:\n print(\"Mode score : \", model_score_thr)\n img_ids = gt_boxes.keys() if ithr == 0 else model_scores[model_score_thr]\n for img_id in img_ids:\n\n gt_boxes_img = gt_boxes[img_id]\n box_scores = pred_boxes_pruned[img_id]['scores']\n start_idx = 0\n for score in box_scores:\n if score <= model_score_thr:\n pred_boxes_pruned[img_id]\n start_idx += 1\n else:\n break\n # Remove boxes, scores of lower than threshold scores:\n pred_boxes_pruned[img_id]['scores'] = pred_boxes_pruned[img_id]['scores'][start_idx:]\n pred_boxes_pruned[img_id]['boxes'] = pred_boxes_pruned[img_id]['boxes'][start_idx:]\n # Recalculate image results for this image\n print(img_id)\n img_results[img_id] = get_single_image_results(gt_boxes_img, pred_boxes_pruned[img_id]['boxes'], iou_thr=0.5)\n # calculate precision and recall\n prec, rec = calc_precision_recall(img_results)\n precisions.append(prec)\n recalls.append(rec)\n model_thrs.append(model_score_thr)\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n prec_at_rec = []\n for recall_level in np.linspace(0.0, 1.0, 11):\n try:\n args = np.argwhere(recalls > recall_level).flatten()\n prec = max(precisions[args])\n print(recalls, \"Recall\")\n print(recall_level, \"Recall Level\")\n print(args, \"Args\")\n print(prec, \"precision\")\n except ValueError:\n prec = 0.0\n prec_at_rec.append(prec)\n avg_prec = np.mean(prec_at_rec)\n return {\n 'avg_prec': avg_prec,\n 'precisions': precisions,\n 'recalls': recalls,\n 'model_thrs': model_thrs}\n", "repo_name": "yohann84L/faster_rcnn_test_case", "sub_path": "src/model/engine.py", "file_name": "engine.py", "file_ext": "py", "file_size_in_byte": 10609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "utils.metric_logger.MetricLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.metric_logger.SmoothedValue", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.utils.warmup_lr_scheduler", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.utils.reduce_dict", "line_number": 35, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 35, "usage_type": "name"}, {"api_name": "math.isfinite", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.get_num_threads", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.set_num_threads", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.metric_logger.MetricLogger", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.int8", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.cuda.synchronize", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 72, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.int8", "line_number": 85, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.set_num_threads", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.clamp", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 281, "usage_type": "call"}]}
+{"seq_id": "21538815712", "text": "import json\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\n\n\ngeodict = {}\ndef initialize():\n global geodict\n with open('./data/melbourne.geojson') as json_file:\n data = json.load(json_file)\n for p in data['features']:\n id = p['id']\n name = p['properties']['SA2_NAME16']\n geodict[id] = {}\n geodict[id]['name'] = name\n if p['geometry']['type'] == 'Polygon':\n geodict[id]['type'] = 'polygon'\n coorlists = p['geometry']['coordinates'][0]\n polygonlist = []\n for coor in coorlists:\n polygonlist.append((coor[0],coor[1]))\n polygon = Polygon(polygonlist)\n geodict[id]['polygon'] = polygon\n else:\n geodict[id]['type'] = 'MultiPolygon'\n geodict[id]['polygons'] = []\n for polyCoors in p['geometry']['coordinates']:\n polygonlist = []\n for coor in polyCoors[0]:\n polygonlist.append((coor[0],coor[1]))\n geodict[id]['polygons'].append(Polygon(polygonlist))\n\n\ndef whichSurburb(longtitude, latitude):\n point = Point(longtitude, latitude)\n for i in geodict.keys():\n if geodict[i]['type'] == 'polygon':\n if geodict[i]['polygon'].contains(point):\n return i\n else:\n for polygon in geodict[i]['polygons']:\n if polygon.contains(point):\n return i\n return -1\n\n\ndef handle_raw(raw):\n longtitude = raw['coordinates']['coordinates'][0]\n latitude = raw['coordinates']['coordinates'][1]\n return whichSurburb(longtitude, latitude), longtitude, latitude\n", "repo_name": "CaviarChen/CCC_Project_2", "sub_path": "preprocessor/surburbHandler.py", "file_name": "surburbHandler.py", "file_ext": "py", "file_size_in_byte": 1778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "73", "api": [{"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "shapely.geometry.polygon.Polygon", "line_number": 22, "usage_type": "call"}, {"api_name": "shapely.geometry.polygon.Polygon", "line_number": 31, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "2378962610", "text": "from aioextensions import (\n collect,\n run,\n)\nfrom async_lru import (\n alru_cache,\n)\nfrom charts.generators.bar_chart import (\n format_csv_data,\n)\nfrom charts.generators.bar_chart.mttr_benchmarking_cvssf import (\n _get_historic_verification,\n)\nfrom charts.generators.bar_chart.utils import (\n Benchmarking,\n get_valid_subjects,\n get_vulnerability_reattacks,\n GROUP_CATEGORIES,\n ORGANIZATION_CATEGORIES,\n PORTFOLIO_CATEGORIES,\n)\nfrom charts.generators.common.colors import (\n EXPOSURE,\n)\nfrom charts.generators.common.utils import (\n BAR_RATIO_WIDTH,\n get_max_axis,\n)\nfrom charts.generators.stacked_bar_chart.exposed_over_time_cvssf import (\n get_group_document,\n)\nfrom charts.generators.stacked_bar_chart.util_class import (\n RiskOverTime,\n)\nfrom charts.generators.stacked_bar_chart.utils import (\n get_current_time_range,\n)\nfrom charts.utils import (\n format_cvssf,\n get_portfolios_groups,\n iterate_groups,\n iterate_organizations_and_groups,\n json_dump,\n)\nfrom custom_utils.findings import (\n get_group_findings,\n)\nfrom dataloaders import (\n Dataloaders,\n get_new_context,\n)\nfrom db_model.vulnerabilities.types import (\n VulnerabilityVerification,\n)\nfrom decimal import (\n Decimal,\n)\nimport math\nfrom organizations.domain import (\n get_all_active_group_names,\n)\nfrom statistics import (\n mean,\n)\n\n\ndef format_cvssf_log(cvssf: Decimal) -> Decimal:\n if cvssf <= Decimal(\"0.0\"):\n return cvssf.quantize(Decimal(\"0.1\"))\n\n return Decimal(math.log2(cvssf))\n\n\ndef format_max_value(data: tuple[Decimal, ...]) -> Decimal:\n if data:\n return sorted(data, reverse=True)[0]\n\n return Decimal(\"1.0\")\n\n\n@alru_cache(maxsize=None, typed=True)\nasync def get_data_one_group(group: str, loaders: Dataloaders) -> Benchmarking:\n group_findings = await get_group_findings(\n group_name=group, loaders=loaders\n )\n vulnerabilities = await loaders.finding_vulnerabilities.load_many_chained(\n [finding.id for finding in group_findings]\n )\n historics_verification: tuple[\n tuple[VulnerabilityVerification, ...], ...\n ] = await collect(\n tuple(\n _get_historic_verification(loaders, vulnerability)\n for vulnerability in vulnerabilities\n if vulnerability.verification\n ),\n workers=4,\n )\n\n number_of_reattacks = sum(\n get_vulnerability_reattacks(historic_verification=historic)\n for historic in historics_verification\n )\n\n group_document: RiskOverTime = await get_group_document(group, loaders)\n document = get_current_time_range(tuple([group_document]))[0][0]\n values: list[Decimal] = [\n Decimal(document[name][date]).quantize(Decimal(\"0.1\"))\n for date in tuple(document[\"date\"])[-12:]\n for name in document\n if name != \"date\"\n ]\n\n return Benchmarking(\n is_valid=number_of_reattacks > 10,\n subject=group.lower(),\n mttr=values[-1] if len(values) > 0 else Decimal(\"0.0\"),\n number_of_reattacks=number_of_reattacks,\n )\n\n\n@alru_cache(maxsize=None, typed=True)\nasync def get_data_many_groups(\n organization_id: str,\n groups: tuple[str, ...],\n loaders: Dataloaders,\n) -> Benchmarking:\n groups_data: tuple[Benchmarking, ...] = await collect(\n tuple(get_data_one_group(group, loaders) for group in groups),\n workers=16,\n )\n\n exposure: Decimal = (\n Decimal(sum(group_data.mttr for group_data in groups_data)).quantize(\n Decimal(\"0.1\")\n )\n if groups_data\n else Decimal(\"0.0\")\n )\n number_of_reattacks = sum(\n group_data.number_of_reattacks for group_data in groups_data\n )\n\n return Benchmarking(\n is_valid=number_of_reattacks > 100,\n subject=organization_id,\n mttr=exposure,\n number_of_reattacks=number_of_reattacks,\n )\n\n\ndef get_average_entities(*, entities: list[Benchmarking]) -> Decimal:\n return (\n Decimal(mean([subject.mttr for subject in entities])).quantize(\n Decimal(\"0.1\")\n )\n if entities\n else Decimal(\"0.0\")\n )\n\n\ndef get_best_exposure(*, subjects: list[Benchmarking]) -> Decimal:\n return (\n Decimal(min(subject.mttr for subject in subjects)).quantize(\n Decimal(\"0.1\")\n )\n if subjects\n else Decimal(\"0.0\")\n )\n\n\ndef get_worst_exposure(*, subjects: list[Benchmarking]) -> Decimal:\n return (\n Decimal(max(subject.mttr for subject in subjects)).quantize(\n Decimal(\"0.1\")\n )\n if subjects\n else Decimal(\"0.0\")\n )\n\n\ndef format_data(\n all_data: tuple[Decimal, Decimal, Decimal, Decimal],\n categories: list[str],\n) -> dict:\n data = tuple(format_cvssf(value) for value in all_data)\n max_value: Decimal = list(\n sorted(\n [abs(value) for value in data],\n reverse=True,\n )\n )[0]\n\n max_axis_value: Decimal = (\n get_max_axis(value=max_value)\n if max_value > Decimal(\"0.0\")\n else Decimal(\"0.0\")\n )\n\n return dict(\n data=dict(\n columns=[\n [\n \"Exposure\",\n *[format_cvssf_log(value) for value in data],\n ]\n ],\n colors={\n \"Exposure\": EXPOSURE,\n },\n labels=True,\n type=\"bar\",\n ),\n axis=dict(\n x=dict(\n categories=categories,\n type=\"category\",\n ),\n y=dict(\n min=0,\n padding=dict(\n bottom=0,\n top=0,\n ),\n label=dict(\n text=\"CVSSF\",\n position=\"inner-top\",\n ),\n tick=dict(\n count=5,\n ),\n **(\n {}\n if max_axis_value == Decimal(\"0.0\")\n else dict(max=format_cvssf_log(max_axis_value))\n ),\n ),\n ),\n bar=dict(\n width=dict(\n ratio=BAR_RATIO_WIDTH,\n ),\n ),\n tooltip=dict(\n show=False,\n ),\n legend=dict(\n show=False,\n ),\n mttrBenchmarking=True,\n maxValue=format_max_value(data),\n maxValueLog=format_max_value(\n tuple(format_cvssf_log(value) for value in data)\n ),\n originalValues=[\n Decimal(value).quantize(Decimal(\"0.1\")) for value in data\n ],\n grid=dict(\n x=dict(\n show=False,\n ),\n y=dict(\n show=True,\n ),\n ),\n hideYAxisLine=True,\n hideXTickLine=True,\n exposureBenchmarkingCvssf=True,\n )\n\n\nasync def generate() -> None: # pylint: disable=too-many-locals\n loaders: Dataloaders = get_new_context()\n organizations: list[tuple[str, tuple[str, ...]]] = []\n portfolios: list[tuple[str, tuple[str, ...]]] = []\n group_names: list[str] = list(\n sorted(\n await get_all_active_group_names(loaders),\n reverse=True,\n )\n )\n\n async for org_id, _, org_groups in iterate_organizations_and_groups():\n organizations.append((org_id, org_groups))\n\n async for org_id, org_name, _ in iterate_organizations_and_groups():\n for portfolio, p_groups in await get_portfolios_groups(org_name):\n portfolios.append(\n (f\"{org_id}PORTFOLIO#{portfolio}\", tuple(p_groups))\n )\n\n all_groups_data: tuple[Benchmarking, ...] = await collect(\n tuple(\n get_data_one_group(\n group_name,\n loaders,\n )\n for group_name in group_names\n ),\n workers=8,\n )\n\n all_organizations_data: tuple[Benchmarking, ...] = await collect(\n tuple(\n get_data_many_groups(\n organization_id=organization[0],\n groups=organization[1],\n loaders=loaders,\n )\n for organization in organizations\n ),\n workers=8,\n )\n\n all_portfolios_data: tuple[Benchmarking, ...] = await collect(\n tuple(\n get_data_many_groups(\n organization_id=portfolio[0],\n groups=portfolio[1],\n loaders=loaders,\n )\n for portfolio in portfolios\n ),\n workers=8,\n )\n\n best_exposure: Decimal = get_best_exposure(\n subjects=[\n organization\n for organization in all_organizations_data\n if organization.is_valid\n ]\n )\n\n worst_organazation_exposure: Decimal = get_worst_exposure(\n subjects=[\n organization\n for organization in all_organizations_data\n if organization.is_valid\n ],\n )\n\n best_group_exposure: Decimal = get_best_exposure(\n subjects=[group for group in all_groups_data if group.is_valid]\n )\n\n worst_group_exposure: Decimal = get_worst_exposure(\n subjects=[group for group in all_groups_data if group.is_valid],\n )\n\n best_portfolio_exposure: Decimal = get_best_exposure(\n subjects=[\n portfolio\n for portfolio in all_portfolios_data\n if portfolio.is_valid\n ]\n )\n worst_portfolio_exposure: Decimal = get_worst_exposure(\n subjects=[\n portfolio\n for portfolio in all_portfolios_data\n if portfolio.is_valid\n ],\n )\n\n header: str = \"Categories\"\n alternative: str = \"Exposure\"\n\n async for group in iterate_groups():\n document = format_data(\n all_data=(\n (\n await get_data_one_group(\n group,\n loaders,\n )\n ).mttr,\n best_group_exposure,\n get_average_entities(\n entities=get_valid_subjects(\n all_subjects=all_groups_data,\n )\n ),\n worst_group_exposure,\n ),\n categories=GROUP_CATEGORIES,\n )\n json_dump(\n document=document,\n entity=\"group\",\n subject=group,\n csv_document=format_csv_data(\n document=document, header=header, alternative=alternative\n ),\n )\n\n async for org_id, _, org_groups in iterate_organizations_and_groups():\n document = format_data(\n all_data=(\n (\n await get_data_many_groups(\n organization_id=org_id,\n groups=org_groups,\n loaders=loaders,\n )\n ).mttr,\n best_exposure,\n get_average_entities(\n entities=get_valid_subjects(\n all_subjects=all_organizations_data,\n )\n ),\n worst_organazation_exposure,\n ),\n categories=ORGANIZATION_CATEGORIES,\n )\n json_dump(\n document=document,\n entity=\"organization\",\n subject=org_id,\n csv_document=format_csv_data(\n document=document, header=header, alternative=alternative\n ),\n )\n\n async for org_id, org_name, _ in iterate_organizations_and_groups():\n for portfolio, pgroup_names in await get_portfolios_groups(org_name):\n document = format_data(\n all_data=(\n (\n await get_data_many_groups(\n organization_id=f\"{org_id}PORTFOLIO#{portfolio}\",\n groups=pgroup_names,\n loaders=loaders,\n )\n ).mttr,\n best_portfolio_exposure,\n get_average_entities(\n entities=get_valid_subjects(\n all_subjects=all_portfolios_data,\n )\n ),\n worst_portfolio_exposure,\n ),\n categories=PORTFOLIO_CATEGORIES,\n )\n json_dump(\n document=document,\n entity=\"portfolio\",\n subject=f\"{org_id}PORTFOLIO#{portfolio}\",\n csv_document=format_csv_data(\n document=document,\n header=header,\n alternative=alternative,\n ),\n )\n\n\nif __name__ == \"__main__\":\n run(generate())\n", "repo_name": "cognettings/vulscanner", "sub_path": "integrates/charts/generators/bar_chart/exposure_benchmarking_cvssf.py", "file_name": "exposure_benchmarking_cvssf.py", "file_ext": "py", "file_size_in_byte": 12808, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "decimal.Decimal", "line_number": 67, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 68, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 69, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 71, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 71, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 74, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 78, "usage_type": "call"}, {"api_name": "dataloaders.Dataloaders", "line_number": 82, "usage_type": "name"}, {"api_name": "custom_utils.findings.get_group_findings", "line_number": 83, "usage_type": "call"}, {"api_name": "db_model.vulnerabilities.types.VulnerabilityVerification", "line_number": 90, "usage_type": "name"}, {"api_name": "aioextensions.collect", "line_number": 91, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.mttr_benchmarking_cvssf._get_historic_verification", "line_number": 93, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.get_vulnerability_reattacks", "line_number": 101, "usage_type": "call"}, {"api_name": "charts.generators.stacked_bar_chart.util_class.RiskOverTime", "line_number": 105, "usage_type": "name"}, {"api_name": "charts.generators.stacked_bar_chart.exposed_over_time_cvssf.get_group_document", "line_number": 105, "usage_type": "call"}, {"api_name": "charts.generators.stacked_bar_chart.utils.get_current_time_range", "line_number": 106, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 107, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 108, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 114, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 117, "usage_type": "call"}, {"api_name": "async_lru.alru_cache", "line_number": 81, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 82, "usage_type": "name"}, {"api_name": "dataloaders.Dataloaders", "line_number": 126, "usage_type": "name"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 128, "usage_type": "name"}, {"api_name": "aioextensions.collect", "line_number": 128, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 133, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 134, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 135, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 138, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 144, "usage_type": "call"}, {"api_name": "async_lru.alru_cache", "line_number": 122, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 127, "usage_type": "name"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 152, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 154, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 154, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 155, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 158, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 152, "usage_type": "name"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 162, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 164, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 165, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 168, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 162, "usage_type": "name"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 172, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 174, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 175, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 178, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 172, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 183, "usage_type": "name"}, {"api_name": "charts.utils.format_cvssf", "line_number": 186, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 187, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 194, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 196, "usage_type": "call"}, {"api_name": "charts.generators.common.utils.get_max_axis", "line_number": 195, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 197, "usage_type": "call"}, {"api_name": "charts.generators.common.colors.EXPOSURE", "line_number": 209, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 234, "usage_type": "call"}, {"api_name": "charts.generators.common.utils.BAR_RATIO_WIDTH", "line_number": 241, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 256, "usage_type": "call"}, {"api_name": "dataloaders.Dataloaders", "line_number": 273, "usage_type": "name"}, {"api_name": "dataloaders.get_new_context", "line_number": 273, "usage_type": "call"}, {"api_name": "organizations.domain", "line_number": 274, "usage_type": "name"}, {"api_name": "organizations.domain.get_all_active_group_names", "line_number": 278, "usage_type": "call"}, {"api_name": "charts.utils.iterate_organizations_and_groups", "line_number": 283, "usage_type": "call"}, {"api_name": "organizations.domain.append", "line_number": 284, "usage_type": "call"}, {"api_name": "organizations.domain", "line_number": 284, "usage_type": "name"}, {"api_name": "charts.utils.iterate_organizations_and_groups", "line_number": 286, "usage_type": "call"}, {"api_name": "charts.utils.get_portfolios_groups", "line_number": 287, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 292, "usage_type": "name"}, {"api_name": "aioextensions.collect", "line_number": 292, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 303, "usage_type": "name"}, {"api_name": "aioextensions.collect", "line_number": 303, "usage_type": "call"}, {"api_name": "organizations.domain", "line_number": 310, "usage_type": "name"}, {"api_name": "charts.generators.bar_chart.utils.Benchmarking", "line_number": 315, "usage_type": "name"}, {"api_name": "aioextensions.collect", "line_number": 315, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 327, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 335, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 343, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 347, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 351, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 358, "usage_type": "name"}, {"api_name": "charts.utils.iterate_groups", "line_number": 369, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.get_valid_subjects", "line_number": 380, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.GROUP_CATEGORIES", "line_number": 386, "usage_type": "name"}, {"api_name": "charts.utils.json_dump", "line_number": 388, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.format_csv_data", "line_number": 392, "usage_type": "call"}, {"api_name": "charts.utils.iterate_organizations_and_groups", "line_number": 397, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.get_valid_subjects", "line_number": 409, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.ORGANIZATION_CATEGORIES", "line_number": 415, "usage_type": "name"}, {"api_name": "charts.utils.json_dump", "line_number": 417, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.format_csv_data", "line_number": 421, "usage_type": "call"}, {"api_name": "charts.utils.iterate_organizations_and_groups", "line_number": 426, "usage_type": "call"}, {"api_name": "charts.utils.get_portfolios_groups", "line_number": 427, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.get_valid_subjects", "line_number": 439, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.utils.PORTFOLIO_CATEGORIES", "line_number": 445, "usage_type": "name"}, {"api_name": "charts.utils.json_dump", "line_number": 447, "usage_type": "call"}, {"api_name": "charts.generators.bar_chart.format_csv_data", "line_number": 451, "usage_type": "call"}, {"api_name": "aioextensions.run", "line_number": 460, "usage_type": "call"}]}
+{"seq_id": "11330446402", "text": "from rest_framework import serializers\nfrom .models import Board, TODO, Reminder\n\n\nclass BoardSerializer(serializers.HyperlinkedModelSerializer):\n todos = serializers.HyperlinkedRelatedField(\n view_name='todo-detail',\n many=True,\n read_only=True,\n )\n uncompleted = serializers.HyperlinkedIdentityField(\n view_name='board-uncompleted'\n )\n\n class Meta:\n model = Board\n fields = ('url', 'name', 'todos', 'uncompleted')\n\n\nclass TODOSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = TODO\n fields = ('url', 'title', 'done', 'board', 'created_at', 'updated_at')\n\n\nclass ReminderSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = Reminder\n fields = ('url', 'post_url', 'text', 'delay')\n\n\nclass ReminderSimpleSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Reminder\n fields = ('__all__')\n", "repo_name": "POD666/scalors-test-task", "sub_path": "myproject/myproject/todo_app/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 952, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedRelatedField", "line_number": 6, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedIdentityField", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 11, "usage_type": "name"}, {"api_name": "models.Board", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 20, "usage_type": "name"}, {"api_name": "models.TODO", "line_number": 23, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Reminder", "line_number": 30, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Reminder", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "73348154196", "text": "# -*- coding: utf-8 -*-\nfrom re import UNICODE\nimport ssl\nfrom .parsers import AnchorHTMLParser, URLParser\nfrom .sitemap import SiteMapXML\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass WebCrawler(object):\n \"\"\"\n URL of the website\n Maximum recursion depth allowed (defaulted to 3)\n \"\"\"\n\n def __init__(self, url, max_depth=3):\n self.url = url\n self.max_depth = max_depth\n self.website_content = {}\n\n def get_url_info(self):\n \"\"\"\n Extract information from the parsed URL\n \"\"\"\n self.parsed_url = URLParser(self.url)\n self.domain = self.parsed_url.get_domain()\n self.prefix = self.parsed_url.get_prefix()\n self.root_path = self.parsed_url.get_path()\n\n def is_argument_valid(self):\n \"\"\"\n Verify valid URL\n \"\"\"\n parsed_url = URLParser(self.url)\n print('parsed url',parsed_url)\n test_request, error = self.test_http_get_request(self.url)\n if not parsed_url.get_domain() or not test_request:\n print (error)\n return False\n return True\n\n def crawl_it(self):\n \"\"\"\n Set URL metadata\n Initialize crawling execution\n Generate XML\n \"\"\"\n if not self.is_argument_valid():\n raise Exception('%s is not a valid URL' % self.url)\n self.get_url_info()\n urlset =self.perform_crawling([self.root_path], self.max_depth)\n return urlset\n # sitemap_xml = SiteMapXML(self.website_content, self.prefix, self.domain)\n # sitemap_xml.generate()\n\n def perform_crawling(self, urls_set, max_depth):\n \"\"\"\n Navigate through urls (GET info, SET info, search for links, add new links)\n Respect some constraints (visited page, max depth recursion)\n \"\"\"\n # create a set instead of list\n # because we want unique values\n new_urls_set = set()\n # infinte loop protection\n if max_depth:\n # make sure we just hit the url once\n gen = (url for url in urls_set if url not in self.website_content)\n for url in gen:\n # get response from url\n response, lastmod = self.get(url)\n print('----111111----------',response)\n # set url info\n self.set(url, response, lastmod)\n print('----2222----------')\n\n # get all links inside the response\n links_from_response = self.get_links_from_response(response)\n print('----3333----------',links_from_response)\n\n # put new_urls_set and links_from_response together\n new_urls_set = new_urls_set.union(links_from_response)\n print('----4444----------',new_urls_set)\n\n # recursion call (making sure max_depth gets decremented)\n self.perform_crawling(new_urls_set, max_depth-1)\n return new_urls_set\n def get_links_from_response(self, response):\n \"\"\"\n Extract links from the response using a parser\n https://docs.python.org/2/library/htmlparser.html#HTMLParser.HTMLParser.feed\n \"\"\"\n links = set()\n\n soup = BeautifulSoup(response, \"html.parser\")\n\n #Does something with page\n \n print('count link',soup.find_all('a', href=True))\n for link in soup.find_all('a', href=True):\n\n is_valid = self.is_this_link_valid(link['href'])\n print('isvalid',link['href'])\n if is_valid:\n links.add(link)\n return links \n\n # anchor_parser = AnchorHTMLParser()\n # anchor_parser.feed(response)\n # links = set()\n # for link in anchor_parser.handle_starttag():\n # is_valid = self.is_this_link_valid(link)\n # if is_valid:\n # links.add(link)\n # return links\n\n def is_this_link_valid(self, link):\n if not isinstance(link, (str, UNICODE)):\n return False\n if link.startswith('/') or link.startswith(self.domain) or link.startswith('http' + self.domain):\n return True\n # return False\n\n def set(self, current_url, response, lastmod):\n \"\"\"\n SET URL information\n \"\"\"\n # print 'Setting URL: ' + current_url\n self.website_content[current_url] = {'response': response, 'lastmod': lastmod}\n\n def get(self, current_url):\n \"\"\"\n Get URL via HTTP\n \"\"\"\n print('Fetching URL: ' + current_url)\n response_raw, lastmod = self.http_get_request(current_url)\n return (response_raw, lastmod)\n\n def http_get_request(self, url):\n \"\"\"\n HTTP Request using urllib\n \"\"\"\n try:\n # Check url contains the domain already\n if not self.domain in url:\n complete_url = \"%s://%s%s\" % (self.prefix, self.domain, url)\n else:\n complete_url = url\n print('complete url',complete_url)\n # This packages the request (it doesn't make it)\n response = requests.get(complete_url)\n # Sends the request and catches the response\n # response = urllib.urlopen(request)\n # print(response.content)\n response_raw = response.content\n try:\n lastmod = response.headers['last-modified'] or response.headers['date']\n except:\n lastmod=None\n print(lastmod)\n except:\n print('Something went wrong for this URL: [%s]' % (url))\n response_raw = str()\n lastmod = None\n\n return (response_raw, lastmod)\n\n def test_http_get_request(self, url):\n \"\"\"\n Test HTTP Request using urllib (given url)\n \"\"\"\n try:\n # This packages the request (it doesn't make it)\n print('test url connection',url)\n response = requests.head(url)\n # Sends the request and catches the response\n except Exception as e:\n return (False, e)\n return (True, None)\n", "repo_name": "wanghaisheng/hongsedianyingzimu", "sub_path": "app/webcrawler.py", "file_name": "webcrawler.py", "file_ext": "py", "file_size_in_byte": 6094, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "parsers.URLParser", "line_number": 24, "usage_type": "call"}, {"api_name": "parsers.URLParser", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 93, "usage_type": "call"}, {"api_name": "re.UNICODE", "line_number": 116, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 149, "usage_type": "call"}, {"api_name": "requests.head", "line_number": 173, "usage_type": "call"}]}
+{"seq_id": "73675070635", "text": "import os\nimport pandas as pd\nimport torch\nimport librosa\nfrom torch.utils.data import Dataset\n\nclass CommonVoiceDataset(Dataset):\n def __init__(self, data_root, tsv_file, sample_rate=22050, transform=None):\n \"\"\"\n Initialize the Common Voice dataset.\n\n Args:\n data_root (str): Path to the root directory containing the clips folder and TSV files.\n tsv_file (str): Filename of the TSV file containing metadata (e.g., \"train.tsv\", \"test.tsv\", \"dev.tsv\").\n sample_rate (int, optional): Sample rate to use for the audio data.\n transform (callable, optional): Optional transform to apply to the audio data.\n \"\"\"\n self.data_root = data_root\n self.sample_rate = sample_rate\n self.metadata = pd.read_csv(os.path.join(data_root, tsv_file), delimiter=\"\\t\")\n self.transform = transform\n\n def __len__(self):\n return len(self.metadata)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n # Get the path to the audio file and load it\n audio_file = os.path.join(self.data_root, \"clips\", self.metadata.iloc[idx][\"path\"])\n waveform, sample_rate = librosa.load(audio_file, sr=self.sample_rate)\n waveform = torch.from_numpy(waveform).unsqueeze(0)\n \n # Apply the optional transform\n if self.transform:\n waveform = self.transform(waveform)\n\n # Get the corresponding text label\n text = self.metadata.iloc[idx][\"sentence\"]\n\n return waveform, text\n\n\nif __name__ == \"__main__\":\n data_root = \"/path/to/common_voice/data\"\n train_tsv = \"train.tsv\"\n\n train_dataset = CommonVoiceDataset(data_root, train_tsv)\n\n # Access a sample from the dataset\n waveform, text = train_dataset[0]\n print(\"Waveform shape:\", waveform.shape)\n print(\"Text:\", text)\n", "repo_name": "andorxornot/PersonalNeuralCodec", "sub_path": "pnc/datasets/common_voice_dataset.py", "file_name": "common_voice_dataset.py", "file_ext": "py", "file_size_in_byte": 1877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 7, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.is_tensor", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "librosa.load", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "16831450502", "text": "import spotipy\n#To access authorised Spotify data\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport pandas as pd\nimport re\nimport string\nimport config\n\n'''\n Function Name: get_playlist_tracks\n Description:\n Retrieves the list of songs in a playlist\n Input:\n username (string) - username of track creator\n playlist_id (string) - URI of playlist\n Output:\n List of tracks (dictionaries)\n https://developer.spotify.com/documentation/web-api/reference/#category-tracks\n'''\ndef getPlaylistTracks(username,playlist_id):\n results = sp.user_playlist_tracks(username,playlist_id)\n tracks = results['items']\n while results['next']:\n results = sp.next(results)\n tracks.extend(results['items'])\n return tracks\n\ndef parseTrack(track):\n name = track['track']['name']\n url = track['track']['preview_url']\n uri = track['track']['uri']\n return [name, url, uri]\n\n\nif __name__ == \"__main__\":\n regex = re.compile(\".*?\\((.*?)\\)\")\n\n # client_id and client_secret are defined in hidden config.py\n credentials_manager = SpotifyClientCredentials(client_id=config.client_id,\n client_secret=config.client_secret)\n\n # spotify object to access API\n sp = spotipy.Spotify(client_credentials_manager=credentials_manager)\n \n # get the requested playlists\n playlists = pd.read_csv('csv/playlists.csv')\n\n # generate the song information\n songdf = pd.DataFrame(columns=['songName','songURL', 'songURI'])\n for index, row in playlists.iterrows():\n # get the songs in the playlist\n tracks = getPlaylistTracks(row['Creator'],row['URI'])\n print(int(index/len(playlists)*10000)/100, row['Playlist Name'])\n for track in tracks:\n trackInfo = parseTrack(track)\n if (trackInfo):\n songdf.loc[len(songdf)] = trackInfo\n \n print(\"Number of Songs Before Removing Duplicates: \", len(songdf))\n songdf.drop_duplicates(subset=['songURI'], keep='first', inplace = True)\n print(\"Number of Songs After Removing Duplicates: \", len(songdf))\n songdf.to_csv('csv/songlist.csv', index=None)\n", "repo_name": "jason-shoe/dnn-lofi-generation", "sub_path": "data/retrieval/songlist-retrieval.py", "file_name": "songlist-retrieval.py", "file_ext": "py", "file_size_in_byte": 2195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "spotipy.oauth2.SpotifyClientCredentials", "line_number": 39, "usage_type": "call"}, {"api_name": "config.client_id", "line_number": 39, "usage_type": "attribute"}, {"api_name": "config.client_secret", "line_number": 40, "usage_type": "attribute"}, {"api_name": "spotipy.Spotify", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "24638276829", "text": "# !/usr/bin/env python\r\nimport argparse\r\nimport os\r\nimport random\r\nimport shutil\r\nimport time\r\nimport warnings\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.nn.parallel\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.distributed as dist\r\nimport torch.optim\r\nimport torch.utils.data\r\nimport torch.utils.data.distributed\r\nimport torchvision.transforms as transforms\r\nimport torchvision.models as models\r\n\r\nimport numpy as np\r\n\r\nfrom utils.loader import TwoCropsTransform, GaussianBlur\r\n\r\nfrom utils.utils import get_imagenet_root_split, get_cifar10_data_split, get_cifar100_data_split, \\\r\n get_food101_data_split, get_EuroSAT_data_split, remove_state_dict_module, check_and_cvt_pretrain_type, \\\r\n get_iNaturalist_sub1000_data_split, logger\r\nfrom dataset.customDataset import Custom_Dataset\r\nfrom dataset.cifar10 import subsetCIFAR10, subsetCIFAR100\r\n\r\nfrom utils.optimizer import LARS\r\nfrom utils.proj_head import proj_head_simclr\r\n\r\n\r\nfrom functools import partial\r\n\r\nfrom utils.utils import AverageMeter\r\nfrom utils.utils import nt_xent, gather_features\r\n\r\n\r\nmodel_names = sorted(name for name in models.__dict__\r\n if name.islower() and not name.startswith(\"__\")\r\n and callable(models.__dict__[name]))\r\n\r\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\r\nparser.add_argument('experiment', type=str)\r\nparser.add_argument('--save_dir', type=str, default=\"checkpoints_moco\")\r\nparser.add_argument('--data', metavar='DIR', default='',\r\n help='path to dataset')\r\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',\r\n choices=model_names,\r\n help='model architecture: ' +\r\n ' | '.join(model_names) +\r\n ' (default: resnet50)')\r\nparser.add_argument('-j', '--workers', default=32, type=int, metavar='N',\r\n help='number of data loading workers (default: 32)')\r\nparser.add_argument('--epochs', default=200, type=int, metavar='N',\r\n help='number of total epochs to run')\r\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\r\n help='manual epoch number (useful on restarts)')\r\nparser.add_argument('-b', '--batch-size', default=256, type=int,\r\n metavar='N',\r\n help='mini-batch size (default: 256), this is the total '\r\n 'batch size of all GPUs on the current node when '\r\n 'using Data Parallel or Distributed Data Parallel')\r\nparser.add_argument('--lr', '--learning-rate', default=0.03, type=float,\r\n metavar='LR', help='initial learning rate', dest='lr')\r\nparser.add_argument('--schedule', default=[120, 160], nargs='*', type=int,\r\n help='learning rate schedule (when to drop lr by 10x)')\r\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\r\n help='momentum of SGD solver')\r\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\r\n metavar='W', help='weight decay (default: 1e-4)',\r\n dest='weight_decay')\r\nparser.add_argument('-p', '--print-freq', default=50, type=int,\r\n metavar='N', help='print frequency (default: 10)')\r\nparser.add_argument('--resume', action='store_true')\r\nparser.add_argument('--checkpoint_pretrain', default='', type=str,\r\n help='the pretrained contrastive learning model. ')\r\nparser.add_argument('--seed', default=None, type=int,\r\n help='seed for initializing training. ')\r\nparser.add_argument('--gpu', default=None, type=int,\r\n help='GPU id to use.')\r\nparser.add_argument('--save_freq', default=100, type=int)\r\nparser.add_argument('--optimizer', default=\"sgd\", type=str)\r\n\r\n# parallel training\r\nparser.add_argument('--local_rank', default=-1, type=int,\r\n help='node rank for distributed training')\r\n\r\n\r\n# moco specific configs:\r\nparser.add_argument('--moco-dim', default=128, type=int,\r\n help='feature dimension (default: 128)')\r\nparser.add_argument('--moco-k', default=65536, type=int,\r\n help='queue size; number of negative keys (default: 65536)')\r\nparser.add_argument('--moco-m', default=0.999, type=float,\r\n help='moco momentum of updating key encoder (default: 0.999)')\r\nparser.add_argument('--moco-t', default=0.07, type=float,\r\n help='softmax temperature (default: 0.07)')\r\nparser.add_argument('--color-jitter-strength', default=1.0, type=float,\r\n help='augmentation color jittering strength')\r\n\r\n# options for moco v2\r\nparser.add_argument('--mlp', action='store_true',\r\n help='use mlp head')\r\nparser.add_argument('--aug-plus', action='store_true',\r\n help='use moco v2 data augmentation')\r\nparser.add_argument('--simclr-t', default=0.2, type=float,\r\n help='softmax temperature (default: 0.2)')\r\nparser.add_argument('--cos', action='store_true',\r\n help='use cosine lr schedule')\r\n\r\n# options for simclr\r\nparser.add_argument('--mlpout', default=128, type=int,\r\n help='the output dimension of simclr')\r\n\r\n# options about dataset\r\nparser.add_argument('--dataset', default='ImageNet', type=str,\r\n help='the dataset to employ')\r\nparser.add_argument('--customSplit', default='', type=str,\r\n help='custom split for training')\r\n\r\n# options for low rank network\r\nparser.add_argument('--low_rank', action='store_true', help='if use DnA during pre-training')\r\nparser.add_argument('--low_rank_r_ratio', default=0, type=float, help='the value of small rank r')\r\nparser.add_argument('--low_rank_alpha', default=10.0, type=float, help='the ratio ')\r\nparser.add_argument('--low_rank_fix_sparse', action='store_true', help='if fix s when tunning low rank')\r\nparser.add_argument('--low_rank_fix_low_rank', action='store_true', help='if fix U, V when tunning low rank')\r\nparser.add_argument('--low_rank_tune_V', action='store_true', help='if only tune V')\r\nparser.add_argument('--low_rank_tune_U', action='store_true', help='if only tune U')\r\nparser.add_argument('--low_rank_tune_V_S', action='store_true', help='if only tune V and S')\r\nparser.add_argument('--low_rank_tune_U_S', action='store_true', help='if only tune U and S')\r\nparser.add_argument('--low_rank_tune_all', action='store_true', help='tune U V while fixing mask of S')\r\nparser.add_argument('--low_rank_compress_step', default=1000, type=int, help='the step number for compressing')\r\nparser.add_argument('--low_rank_lambda_s', default=0.01, type=float, help='the value of sparse threshold')\r\nparser.add_argument('--low_rank_sparse_ratio', default=-1, type=float,\r\n help='if sparse ratio is specified, we globally pick the largest r% weights and set it as sparse')\r\nparser.add_argument('--low_rank_UV_lr_ratio', default=1, type=float, help='the lr employed for low rank part '\r\n 'compared to sparse part')\r\nparser.add_argument('--low_rank_only_decompose', action='store_true', help='decompose')\r\nparser.add_argument('--low_rank_keep_noise', action='store_true', help='if keep the noise')\r\nparser.add_argument('--low_rank_reshape_consecutive', action='store_true', help='if use reshape consecutive')\r\nparser.add_argument('--low_rank_decompose_no_s', action='store_true', help='if use decompose without s')\r\nparser.add_argument('--low_rank_lora_mode', action='store_true', help='if use lora mode for low rank')\r\n\r\n\r\ndef main():\r\n args = parser.parse_args()\r\n\r\n if args.seed is not None:\r\n random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n cudnn.deterministic = True\r\n warnings.warn('You have chosen to seed training. '\r\n 'This will turn on the CUDNN deterministic setting, '\r\n 'which can slow down your training considerably! '\r\n 'You may see unexpected behavior when restarting '\r\n 'from checkpoints.')\r\n\r\n if args.gpu is not None:\r\n warnings.warn('You have chosen a specific GPU. This will completely '\r\n 'disable data parallelism.')\r\n\r\n if args.local_rank == -1:\r\n args.local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])\r\n\r\n print(\"distributing\")\r\n dist.init_process_group(backend=\"nccl\", init_method=\"env://\")\r\n print(\"paired\")\r\n\r\n torch.cuda.set_device(args.local_rank)\r\n\r\n gloal_rank = torch.distributed.get_rank()\r\n args.gpu = gloal_rank\r\n\r\n world_size = torch.distributed.get_world_size()\r\n print(\"employ {} gpus in total\".format(world_size))\r\n\r\n logName = \"log.txt\"\r\n save_dir = os.path.join(args.save_dir, args.experiment)\r\n if not os.path.exists(save_dir):\r\n os.system(\"mkdir -p {}\".format(save_dir))\r\n log = logger(path=save_dir, local_rank=gloal_rank, log_name=logName)\r\n log.info(str(args))\r\n\r\n main_worker(args.local_rank, gloal_rank, world_size, save_dir, log, args)\r\n\r\n\r\ndef setup_optimizer(optimizer_type, params, lr, momentum, weight_decay, log):\r\n log.info('INFO: Creating Optimizer: [{}] LR: [{:.8f}] Momentum : [{:.8f}] Weight Decay: [{:.8f}]'\r\n .format(optimizer_type, lr, momentum, weight_decay))\r\n\r\n if optimizer_type == 'adam':\r\n optimizer_fun = torch.optim.Adam\r\n optimizer_params = {\"lr\": lr}\r\n elif optimizer_type == 'lars':\r\n optimizer_fun = LARS\r\n optimizer_params = {\"lr\": lr, \"weight_decay\": weight_decay}\r\n elif optimizer_type == 'sgd':\r\n optimizer_fun = torch.optim.SGD\r\n optimizer_params = {\"lr\": lr, \"weight_decay\": weight_decay, \"momentum\": momentum}\r\n else:\r\n raise NotImplementedError(\"no defined optimizer: {}\".format(optimizer_type))\r\n\r\n optimizer = optimizer_fun(params, **optimizer_params)\r\n\r\n return optimizer\r\n\r\n\r\ndef main_worker(local_rank, global_rank, world_size, save_dir, log, args):\r\n\r\n # prepare dataset\r\n train_dataset = init_dataset(args, log)\r\n\r\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\r\n args.batch_size = int(args.batch_size / world_size)\r\n train_loader = torch.utils.data.DataLoader(\r\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\r\n num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)\r\n\r\n # create model\r\n log.info(\"=> creating model '{}'\".format(args.arch))\r\n\r\n if args.low_rank:\r\n from models.general_framework import resnet_frame\r\n from models.general_framework.convs.low_rank_conv2d import low_rank_conv2d\r\n conv_layer = partial(low_rank_conv2d, lora_alpha=args.low_rank_alpha, r_ratio=args.low_rank_r_ratio,\r\n fix_sparse=args.low_rank_fix_sparse, fix_low_rank=args.low_rank_fix_low_rank,\r\n tune_U=args.low_rank_tune_U, tune_V=args.low_rank_tune_V,\r\n tune_V_S=args.low_rank_tune_V_S, tune_U_S=args.low_rank_tune_U_S,\r\n tune_all=args.low_rank_tune_all,\r\n keep_noise=args.low_rank_keep_noise,\r\n reshape_consecutive=args.low_rank_reshape_consecutive,\r\n decompose_no_s=args.low_rank_decompose_no_s, lora_mode=args.low_rank_lora_mode)\r\n model = resnet_frame.__dict__[args.arch](conv_layer=conv_layer)\r\n else:\r\n from models.origin import resnet\r\n model = resnet.__dict__[args.arch]()\r\n\r\n in_dim = model.fc.in_features # 2048\r\n model.fc = proj_head_simclr(in_dim, output_cnt=args.mlpout)\r\n\r\n if args.dataset == \"cifar10\" or args.dataset == \"cifar100\":\r\n log.info(\"remove maxpooling and enlarge conv layer for small resolution\")\r\n model.conv1 = nn.Conv2d(3, model.conv1.out_channels, kernel_size=3, stride=1, padding=1, bias=False)\r\n model.maxpool = nn.Identity()\r\n\r\n process_group = torch.distributed.new_group(list(range(world_size)))\r\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model, process_group)\r\n\r\n # For multiprocessing distributed, DistributedDataParallel constructor\r\n # should always set the single device scope, otherwise,\r\n # DistributedDataParallel will use all available devices.\r\n torch.cuda.set_device(args.local_rank)\r\n model.cuda(args.local_rank)\r\n # When using a single GPU per process and per\r\n # DistributedDataParallel, we need to divide the batch size\r\n # ourselves based on the total number of GPUs we have\r\n\r\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n find_unused_parameters=False)\r\n\r\n # define loss function (criterion) and optimizer\r\n criterion = nn.CrossEntropyLoss().cuda(args.local_rank)\r\n\r\n if args.checkpoint_pretrain != '':\r\n checkpoint = torch.load(args.checkpoint_pretrain, map_location=\"cpu\")\r\n if 'state_dict' in checkpoint:\r\n state_dict = checkpoint['state_dict']\r\n elif 'P_state' in checkpoint:\r\n state_dict = checkpoint['P_state']\r\n else:\r\n state_dict = checkpoint\r\n\r\n state_dict = remove_state_dict_module(state_dict)\r\n state_dict = check_and_cvt_pretrain_type(state_dict, model.module.state_dict(), log)\r\n\r\n model_dict = model.module.state_dict()\r\n ori_model_keys_num = model_dict.keys().__len__()\r\n\r\n if args.dataset == \"cifar10\" or args.dataset == \"cifar100\":\r\n shape = state_dict['conv1.weight'].shape\r\n if shape[-1] != 3:\r\n out_shape = [3, 3]\r\n state_dict['conv1.weight'] = F.interpolate(state_dict['conv1.weight'], out_shape)\r\n\r\n overlap_state_dict = {k: v for k, v in state_dict.items() if k in model_dict.keys()}\r\n overlap_keys_num = overlap_state_dict.keys().__len__()\r\n\r\n model_dict.update(overlap_state_dict)\r\n\r\n model.module.load_state_dict(model_dict)\r\n\r\n log.info(\"Load SimCLR Pre-trained Model! [{}/{}]\"\r\n .format(overlap_keys_num, ori_model_keys_num))\r\n\r\n log.info('read pretrain model {}'.format(args.checkpoint_pretrain))\r\n\r\n if args.low_rank:\r\n from low_rank import prepare_low_rank\r\n params = prepare_low_rank(model, args.low_rank_compress_step, args.low_rank_lambda_s,\r\n args.low_rank_r_ratio, args.checkpoint_pretrain, args.low_rank_keep_noise, log,\r\n args.dataset, args.lr * args.low_rank_UV_lr_ratio, args.low_rank_reshape_consecutive,\r\n args.low_rank_decompose_no_s, args.low_rank_lora_mode, args.low_rank_sparse_ratio)\r\n if args.low_rank_only_decompose:\r\n return\r\n else:\r\n params = model.parameters()\r\n\r\n optimizer = setup_optimizer(args.optimizer, params, args.lr,\r\n momentum=args.momentum,\r\n weight_decay=args.weight_decay, log=log)\r\n\r\n if args.resume:\r\n if os.path.isfile(os.path.join(save_dir, 'checkpoint.pth.tar')):\r\n log.info(\"=> loading checkpoint '{}'\".format(os.path.join(save_dir, 'checkpoint.pth.tar')))\r\n if args.gpu is None:\r\n checkpoint = torch.load(os.path.join(save_dir, 'checkpoint.pth.tar'))\r\n else:\r\n # Map model to be loaded to specified single gpu.\r\n loc = 'cuda:{}'.format(args.local_rank)\r\n checkpoint = torch.load(os.path.join(save_dir, 'checkpoint.pth.tar'), map_location=loc)\r\n args.start_epoch = checkpoint['epoch']\r\n model.load_state_dict(checkpoint['state_dict'])\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n\r\n log.info(\"=> loaded checkpoint '{}' (epoch {})\"\r\n .format(args.resume, checkpoint['epoch']))\r\n else:\r\n log.info(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n log.info(\"no available checkpoint, start from scratch!!!!!!!!!!!\")\r\n log.info(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n\r\n cudnn.benchmark = True\r\n\r\n for epoch in range(args.start_epoch, args.epochs):\r\n train_sampler.set_epoch(epoch)\r\n adjust_learning_rate(optimizer, epoch, args, log)\r\n\r\n train_simclr(train_loader, model, optimizer, epoch, log, args, local_rank, world_size)\r\n\r\n if global_rank == 0:\r\n save_dict = {\r\n 'epoch': epoch + 1,\r\n 'arch': args.arch,\r\n 'state_dict': model.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n }\r\n\r\n save_checkpoint(save_dict, is_best=False,\r\n filename=os.path.join(save_dir, 'checkpoint.pth.tar'.format(epoch + 1)))\r\n\r\n if (epoch + 1) % args.save_freq == 0 or (epoch + 1) == args.epochs:\r\n save_checkpoint(save_dict, is_best=False,\r\n filename=os.path.join(save_dir, 'checkpoint_{}.pth.tar'.format(epoch + 1)))\r\n\r\n # remove checkpoint for resuming after training finished\r\n if (epoch + 1) == args.epochs and args.save_freq > 800:\r\n os.system(\"rm {}\".format(os.path.join(save_dir, 'checkpoint.pth.tar'.format(epoch + 1))))\r\n\r\n\r\ndef init_dataset(args, log):\r\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n\r\n if args.dataset == \"cifar10\" or args.dataset == \"cifar100\":\r\n image_size = 32\r\n else:\r\n image_size = 224\r\n\r\n if args.aug_plus:\r\n # MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709\r\n s = args.color_jitter_strength\r\n log.info(\"employed augmentation strength is {}\".format(s))\r\n augmentation = transforms.Compose([\r\n transforms.RandomResizedCrop(image_size, scale=(0.2, 1.)),\r\n transforms.RandomApply([\r\n transforms.ColorJitter(0.4 * s, 0.4 * s, 0.4 * s, 0.1 * s) # not strengthened\r\n ], p=0.8),\r\n transforms.RandomGrayscale(p=0.2),\r\n transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n normalize\r\n ])\r\n else:\r\n # MoCo v1's aug: the same as InstDisc https://arxiv.org/abs/1805.01978\r\n assert False\r\n\r\n if args.dataset == \"imagenet\":\r\n # Data loading code\r\n root, txt_train, _, _, pathReplaceDict = get_imagenet_root_split(args.data, args.customSplit)\r\n train_dataset = Custom_Dataset(\r\n root,\r\n txt_train,\r\n TwoCropsTransform(augmentation),\r\n pre_load=False, pathReplace=pathReplaceDict)\r\n elif args.dataset == \"cifar10\" or args.dataset == \"cifar10_large\":\r\n # the data distribution\r\n root, train_idx, _ = get_cifar10_data_split(args.data, args.customSplit, ssl=True)\r\n\r\n train_idx = list(np.load(train_idx))\r\n train_dataset = subsetCIFAR10(root=root, sublist=train_idx, download=True,\r\n transform=TwoCropsTransform(augmentation))\r\n elif args.dataset == \"cifar100\" or args.dataset == \"cifar100_large\":\r\n # the data distribution\r\n root, train_idx, _ = get_cifar100_data_split(args.data, args.customSplit, ssl=True)\r\n\r\n train_idx = list(np.load(train_idx))\r\n train_dataset = subsetCIFAR100(root=root, sublist=train_idx, download=True,\r\n transform=TwoCropsTransform(augmentation))\r\n elif args.dataset == \"food-101\":\r\n # Data loading code\r\n root, txt_train, _, _ = get_food101_data_split(args.data, args.customSplit, ssl=True)\r\n train_dataset = Custom_Dataset(\r\n root,\r\n txt_train,\r\n TwoCropsTransform(augmentation),\r\n pre_load=False)\r\n elif args.dataset == \"EuroSAT\":\r\n # Data loading code\r\n root, txt_train, _, _ = get_EuroSAT_data_split(args.data, args.customSplit, ssl=True)\r\n train_dataset = Custom_Dataset(\r\n root,\r\n txt_train,\r\n TwoCropsTransform(augmentation),\r\n pre_load=False)\r\n elif args.dataset == \"iNaturalist_sub1000\":\r\n # Data loading code\r\n root, txt_train, _, _ = get_iNaturalist_sub1000_data_split(args.data, args.customSplit)\r\n train_dataset = Custom_Dataset(\r\n root,\r\n txt_train,\r\n TwoCropsTransform(augmentation),\r\n pre_load=False)\r\n else:\r\n raise ValueError(\"No such dataset: {}\".format(args.dataset))\r\n\r\n return train_dataset\r\n\r\n\r\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\r\n torch.save(state, filename)\r\n if is_best:\r\n shutil.copyfile(filename, 'model_best.pth.tar')\r\n\r\n\r\ndef adjust_learning_rate(optimizer, epoch, args, log):\r\n \"\"\"Decay the learning rate based on schedule\"\"\"\r\n lr = args.lr\r\n if args.cos: # cosine lr schedule\r\n lr = cosine_annealing(epoch, args.epochs, lr, 1e-6, warmup_steps=10)\r\n else: # stepwise lr schedule\r\n for milestone in args.schedule:\r\n lr *= 0.1 if epoch >= milestone else 1.\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n log.info(\"current lr is {}\".format(lr))\r\n\r\n\r\ndef cosine_annealing(step, total_steps, lr_max, lr_min, warmup_steps=0):\r\n assert warmup_steps >= 0\r\n\r\n if step < warmup_steps:\r\n lr = lr_max * step / warmup_steps\r\n else:\r\n lr = lr_min + (lr_max - lr_min) * 0.5 * (\r\n 1 + np.cos((step - warmup_steps) / (total_steps - warmup_steps) * np.pi))\r\n\r\n return lr\r\n\r\n\r\ndef train_simclr(train_loader, model, optimizer, epoch, log, args, local_rank, world_size):\r\n losses = AverageMeter()\r\n losses.reset()\r\n data_time_meter = AverageMeter()\r\n train_time_meter = AverageMeter()\r\n\r\n end = time.time()\r\n\r\n for i, (inputs, _) in enumerate(train_loader):\r\n\r\n data_time = time.time() - end\r\n data_time_meter.update(data_time)\r\n\r\n inputs = torch.stack(inputs, dim=1)\r\n d = inputs.size()\r\n # print(\"inputs origin shape is {}\".format(d))\r\n inputs = inputs.view(d[0] * 2, d[2], d[3], d[4]).cuda(non_blocking=True)\r\n\r\n model.train()\r\n\r\n features = model(inputs)\r\n\r\n features = gather_features(features, local_rank, world_size)\r\n\r\n loss = nt_xent(features, t=args.simclr_t)\r\n\r\n # normalize the loss\r\n loss = loss * world_size\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n\r\n optimizer.step()\r\n\r\n losses.update(float(loss.detach().cpu() / world_size), inputs.shape[0])\r\n\r\n train_time = time.time() - end\r\n end = time.time()\r\n train_time_meter.update(train_time)\r\n\r\n # torch.cuda.empty_cache()\r\n if i % args.print_freq == 0 or i == len(train_loader) - 1:\r\n log.info('Epoch: [{0}][{1}/{2}]\\t'\r\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\r\n 'data_time: {data_time.val:.2f} ({data_time.avg:.2f})\\t'\r\n 'train_time: {train_time.val:.2f} ({train_time.avg:.2f})\\t'.format(\r\n epoch, i, len(train_loader), loss=losses,\r\n data_time=data_time_meter, train_time=train_time_meter))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "repo_name": "VITA-Group/DnA", "sub_path": "train_simclr.py", "file_name": "train_simclr.py", "file_ext": "py", "file_size_in_byte": 23740, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "73", "api": [{"api_name": "torchvision.models.__dict__", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torchvision.models", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.models.__dict__", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torchvision.models", "line_number": 43, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 45, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.deterministic", "line_number": 154, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 154, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 155, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 162, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 166, "usage_type": "attribute"}, {"api_name": "torch.distributed.init_process_group", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 169, "usage_type": "name"}, {"api_name": "torch.cuda.set_device", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.distributed.get_rank", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 174, "usage_type": "attribute"}, {"api_name": "torch.distributed.get_world_size", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 177, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 183, "usage_type": "call"}, {"api_name": "utils.utils.logger", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 195, "usage_type": "attribute"}, {"api_name": "utils.optimizer.LARS", "line_number": 198, "usage_type": "name"}, {"api_name": "torch.optim", "line_number": 201, "usage_type": "attribute"}, {"api_name": "torch.utils.data.distributed.DistributedSampler", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 218, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 228, "usage_type": "call"}, {"api_name": "models.general_framework.convs.low_rank_conv2d.low_rank_conv2d", "line_number": 228, "usage_type": "argument"}, {"api_name": "models.general_framework.resnet_frame.__dict__", "line_number": 236, "usage_type": "attribute"}, {"api_name": "models.general_framework.resnet_frame", "line_number": 236, "usage_type": "name"}, {"api_name": "models.origin.resnet.__dict__", "line_number": 239, "usage_type": "attribute"}, {"api_name": "models.origin.resnet", "line_number": 239, "usage_type": "name"}, {"api_name": "utils.proj_head.proj_head_simclr", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.nn.Identity", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 247, "usage_type": "name"}, {"api_name": "torch.distributed.new_group", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 249, "usage_type": "attribute"}, {"api_name": "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.nn.SyncBatchNorm", "line_number": 250, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 250, "usage_type": "name"}, {"api_name": "torch.cuda.set_device", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 255, "usage_type": "attribute"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 261, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 266, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.utils.remove_state_dict_module", "line_number": 277, "usage_type": "call"}, {"api_name": "utils.utils.check_and_cvt_pretrain_type", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 287, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 287, "usage_type": "name"}, {"api_name": "low_rank.prepare_low_rank", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 317, "usage_type": "call"}, {"api_name": "os.path", "line_number": 317, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 317, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path", "line_number": 318, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path", "line_number": 320, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path", "line_number": 324, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 336, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 336, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path", "line_number": 353, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path", "line_number": 357, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 361, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 361, "usage_type": "call"}, {"api_name": "os.path", "line_number": 361, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 365, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 365, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 377, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 377, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 378, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 378, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 379, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 379, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 380, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 380, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomGrayscale", "line_number": 382, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 382, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 383, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 383, "usage_type": "name"}, {"api_name": "utils.loader.GaussianBlur", "line_number": 383, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 384, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 384, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 385, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 385, "usage_type": "name"}, {"api_name": "utils.utils.get_imagenet_root_split", "line_number": 394, "usage_type": "call"}, {"api_name": "dataset.customDataset.Custom_Dataset", "line_number": 395, "usage_type": "call"}, {"api_name": "utils.loader.TwoCropsTransform", "line_number": 398, "usage_type": "call"}, {"api_name": "utils.utils.get_cifar10_data_split", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 404, "usage_type": "call"}, {"api_name": "dataset.cifar10.subsetCIFAR10", "line_number": 405, "usage_type": "call"}, {"api_name": "utils.loader.TwoCropsTransform", "line_number": 406, "usage_type": "call"}, {"api_name": "utils.utils.get_cifar100_data_split", "line_number": 409, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 411, "usage_type": "call"}, {"api_name": "dataset.cifar10.subsetCIFAR100", "line_number": 412, "usage_type": "call"}, {"api_name": "utils.loader.TwoCropsTransform", "line_number": 413, "usage_type": "call"}, {"api_name": "utils.utils.get_food101_data_split", "line_number": 416, "usage_type": "call"}, {"api_name": "dataset.customDataset.Custom_Dataset", "line_number": 417, "usage_type": "call"}, {"api_name": "utils.loader.TwoCropsTransform", "line_number": 420, "usage_type": "call"}, {"api_name": "utils.utils.get_EuroSAT_data_split", "line_number": 424, "usage_type": "call"}, {"api_name": "dataset.customDataset.Custom_Dataset", "line_number": 425, "usage_type": "call"}, {"api_name": "utils.loader.TwoCropsTransform", "line_number": 428, "usage_type": "call"}, {"api_name": "utils.utils.get_iNaturalist_sub1000_data_split", "line_number": 432, "usage_type": "call"}, {"api_name": "dataset.customDataset.Custom_Dataset", "line_number": 433, "usage_type": "call"}, {"api_name": "utils.loader.TwoCropsTransform", "line_number": 436, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 445, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 447, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 471, "usage_type": "attribute"}, {"api_name": "utils.utils.AverageMeter", "line_number": 477, "usage_type": "call"}, {"api_name": "utils.utils.AverageMeter", "line_number": 479, "usage_type": "call"}, {"api_name": "utils.utils.AverageMeter", "line_number": 480, "usage_type": "call"}, {"api_name": "time.time", "line_number": 482, "usage_type": "call"}, {"api_name": "time.time", "line_number": 486, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 489, "usage_type": "call"}, {"api_name": "utils.utils.gather_features", "line_number": 498, "usage_type": "call"}, {"api_name": "utils.utils.nt_xent", "line_number": 500, "usage_type": "call"}, {"api_name": "time.time", "line_number": 512, "usage_type": "call"}, {"api_name": "time.time", "line_number": 513, "usage_type": "call"}]}
+{"seq_id": "26857868747", "text": "from pathlib import Path\nfrom unittest.mock import Mock\n\nfrom ton_engine.analysis.sql_analysis import SQLAnalysis\n\n\ndef test_run():\n datasource_mock = Mock()\n datasource_mock.run_query = Mock(return_value=['1', '2'])\n\n sql_analysis = SQLAnalysis(\n slug='abc',\n datasource=datasource_mock,\n sql_file_path=Path(__file__).parent / Path('data/sample.sql'),\n )\n\n result = sql_analysis.run()\n\n datasource_mock.run_query.assert_called_with(query=\"SELECT *\\nFROM Students;\")\n\n assert result == ['1', '2']\n", "repo_name": "monireh-yousefi/ton-engine", "sub_path": "test/ton_engine/analysis/test_sql_analysis.py", "file_name": "test_sql_analysis.py", "file_ext": "py", "file_size_in_byte": 539, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "unittest.mock.Mock", "line_number": 8, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 9, "usage_type": "call"}, {"api_name": "ton_engine.analysis.sql_analysis.SQLAnalysis", "line_number": 11, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "26766185569", "text": "import discord\nimport os\n#from keep_alive import keep_alive\nfrom markov_update import createMarkovJSONFull\nfrom markov_update import updateMarkovJSONFull\nfrom markov_update import createMarkovJSONUser\nfrom markov_update import updateMarkovJSONUser\nfrom markov_chain import getMarkovJSONDict\nfrom markov_chain import createMarkovChain\n\ntoken = open(\"token.txt\", \"r\").read()\n\nclient = discord.Client()\n\ndef getUser(message, userstring):\n user = \"\"\n for foruser in message.guild.members:\n if str(userstring).lower() == str(foruser).lower():\n user = foruser\n if user == \"\":\n for foruser in message.guild.members:\n if str(userstring).lower() in str(foruser.display_name).lower():\n user = foruser\n if user == \"\":\n user = message.author\n return user\n\ndef sendHelpMessage(message):\n embed=discord.Embed(title=\"markov-bot\", url=\"https://github.com/whambulance/markov-bot\", description=\"Talks based on how others type. He's like you, but better! \\nType !markov to create generate a message for you \\n \", color=0x4bb4f1)\n embed.set_thumbnail(url=\"https://upload.wikimedia.org/wikipedia/commons/7/70/AAMarkov.jpg\")\n embed.add_field(name=\"!mv, !markov [USER]... [OPTION]...\",value=\"Basic syntax\", inline=True)\n embed.add_field(name=\"-s --startswith [STR]\", value=\"Chain starting word\", inline=False)\n embed.add_field(name=\"-l, --length [INT]\", value=\"Chain length (def: rand 1 - 14) (max: 100)\", inline=True)\n embed.add_field(name=\"!mkjson\",value=\"Update your Markov JSON Dictionary\", inline=True)\n return embed\n\n@client.event\nasync def on_message(message):\n\n if message.author.bot:\n exit\n\n elif (\"help\" in str(message.content).lower() or \"aid\" in str(message.content).lower() or \"hand\" in str(message.content).lower() or \"assist\" in str(message.content).lower()) and (\"markov\" in str(message.content).lower() or \"mk\" in str(message.content).lower()):\n embed = sendHelpMessage(message)\n await message.channel.send(embed=embed)\n elif message.content.startswith(\"!mk \") or message.content.startswith(\"!markov \") or message.content == \"!mk\" or message.content == \"!markov\":\n user = \"\"\n startswith = \"\"\n length = 0\n\n splitMessage = message.content.split()\n for index, i in enumerate(splitMessage):\n if i == \"-s\" or i == \"--startswith\":\n startswith = splitMessage[index+1]\n elif index == 1 and \"-\" not in i:\n user = i\n elif i == \"-l\" or i == \"--length\":\n length = int(splitMessage[index+1])\n \n if user != \"\":\n messageUser = getUser(message, user)\n else:\n messageUser = message.author\n if length > 100:\n length = 100\n\n print (\"$:makeMarkov -u \" + messageUser.display_name + \" -s \" + startswith + \" -l \" + str(length))\n\n userJSONDict = getMarkovJSONDict(message, messageUser)\n markovChain = createMarkovChain(userJSONDict, startswith, length)\n\n if markovChain != \"\":\n print (\"Printed: \" + str(markovChain))\n print (\"\")\n if messageUser.display_name[0].islower():\n newnick = messageUser.display_name[0:20] + \" markov\"\n elif messageUser.display_name.isupper():\n newnick = messageUser.display_name[0:20] + \" MARKOV\"\n else:\n newnick = messageUser.display_name[0:20] + \" Markov\"\n me = message.guild.me\n await discord.Member.edit(me, nick=newnick)\n await message.channel.send(markovChain)\n await discord.Member.edit(me, nick=\"\")\n \n elif (\"!mkjson\" in str(message.content).lower()):\n splitMessage = message.content.split()\n print (\"$:\" + message.content[1:999] + \" by \" + message.author.display_name)\n for index, i in enumerate(splitMessage):\n msgCount = None\n msgUser = \"\"\n if str(message.author.id) == \"120242398176477186\":\n if i == \"createchannel\":\n print(\"$:\")\n msgCount = None\n await createMarkovJSONFull(message, msgCount)\n return\n elif i == \"createuser\":\n print(\"$:\")\n msgUser = getUser(message, splitMessage[index+1])\n await createMarkovJSONUser(message, msgUser)\n return\n #elif i == \"updatechannel\":\n #print(\"$:\")\n #msgCount = None\n #await updateMarkovJSONFull(message, msgCount)\n #return\n elif i == \"updateuser\":\n print(\"$:\")\n msgUser = getUser(message, splitMessage[index+1])\n await updateMarkovJSONUser(message, msgUser)\n return \n await updateMarkovJSONUser(message, message.author)\n\n elif (\"!mktest\" in str(message.content).lower()):\n splitMessage = message.content.split()\n markovJSONDict = getMarkovJSONDict(message, message.author)\n createMarkovChain(markovJSONDict, \"\", 0)\n\n#keep_alive()\nclient.run(token)", "repo_name": "whambulance/markov-bot", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5353, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "discord.Client", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 29, "usage_type": "call"}, {"api_name": "markov_chain.getMarkovJSONDict", "line_number": 69, "usage_type": "call"}, {"api_name": "markov_chain.createMarkovChain", "line_number": 70, "usage_type": "call"}, {"api_name": "discord.Member.edit", "line_number": 82, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 82, "usage_type": "attribute"}, {"api_name": "discord.Member.edit", "line_number": 84, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 84, "usage_type": "attribute"}, {"api_name": "markov_update.createMarkovJSONFull", "line_number": 96, "usage_type": "call"}, {"api_name": "markov_update.createMarkovJSONUser", "line_number": 101, "usage_type": "call"}, {"api_name": "markov_update.updateMarkovJSONUser", "line_number": 111, "usage_type": "call"}, {"api_name": "markov_update.updateMarkovJSONUser", "line_number": 113, "usage_type": "call"}, {"api_name": "markov_chain.getMarkovJSONDict", "line_number": 117, "usage_type": "call"}, {"api_name": "markov_chain.createMarkovChain", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "14643808079", "text": "import numpy as np\nfrom PIL import ImageGrab\nfrom cv2 import cv2\nimport time\nfrom directkeys import PressKey, ReleaseKey, Z, Q, S, D\nfrom grabscreen import grab_screen\nfrom draw_lanes import draw_lanes\n\ndef draw_lines(image, lines):\n for line in lines:\n coords = line[0]\n cv2.line(image, (coords[0], coords[1]), (coords[2], coords[3]), [255,255,255], 5)\n\n\ndef roi(image, vertices):\n # Blank mask\n mask = np.zeros_like(image)\n \n # Fill the pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, 255)\n\n # Extract pixels from image only where mask pixels are nonzero\n masked = cv2.bitwise_and(image, mask)\n\n\n return masked \n\ndef process_img(image):\n original_image = image\n # Convert to gray\n processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Edge detection\n processed_img = cv2.Canny(processed_img, threshold1=120, threshold2=160)\n processed_img = cv2.GaussianBlur(processed_img, (5,5),0)\n #vertices = np.array([[10, 400], [10, 300], [250, 100], [450, 100], [640, 300], [640, 400]], np.int32)\n vertices = np.array([[10, 350], [10, 300], [320, 210], [325, 210], [640, 300], [640, 350]])\n processed_img = roi(processed_img, [vertices])\n\n # edges\n lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, np.array([]), 20, 10)\n m1 = 0\n m2 = 0\n try:\n l1, l2, m1,m2 = draw_lanes(original_image,lines)\n cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 5)\n cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 5)\n except Exception as e:\n print(str(e))\n pass\n try:\n for coords in lines:\n coords = coords[0]\n try:\n cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 5)\n \n \n except Exception as e:\n print(str(e))\n except Exception as e:\n pass\n\n return processed_img, original_image, m1, m2\n\ndef straight():\n ReleaseKey(Q)\n ReleaseKey(D)\n PressKey(Z)\n\ndef left():\n ReleaseKey(Z)\n ReleaseKey(D)\n PressKey(Q)\n \ndef right():\n ReleaseKey(Z)\n ReleaseKey(Q)\n PressKey(D)\n \ndef slow_down():\n ReleaseKey(Z)\n ReleaseKey(Q)\n ReleaseKey(D)\n\nfor i in list(range(4))[::-1]:\n print(i+1)\n time.sleep(1)\n\n'''PressKey(Z)\ntime.sleep(3)\nReleaseKey(Z)'''\n\ndef main():\n last_time = time.time()\n while(True):\n screen = grab_screen(region=(0,40,640,480))\n new_screen, original_image, m1, m2 = process_img(screen)\n print('Loop took {} seconds'.format(time.time() - last_time))\n last_time = time.time()\n # Showing output\n cv2.imshow('window', new_screen)\n cv2.imshow('window2', cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB))\n\n '''if m1 < 0 and m2 < 0: \n right()\n elif m1 > 0 and m2 > 0:\n left()\n else:\n straight()'''\n\n # Quitting opencv\n if cv2.waitKey(25) & 0xFF == ord('q'):\n # Destroy\n cv2.destroyAllWindows()\n break\n\nif __name__ == \"__main__\":\n main()", "repo_name": "mav3rick177/gtasa_ai_driver", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3205, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "cv2.cv2.line", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.cv2.fillPoly", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 20, "usage_type": "name"}, {"api_name": "cv2.cv2.bitwise_and", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 23, "usage_type": "name"}, {"api_name": "cv2.cv2.cvtColor", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 31, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_BGR2GRAY", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.cv2.Canny", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 33, "usage_type": "name"}, {"api_name": "cv2.cv2.GaussianBlur", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.cv2.HoughLinesP", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "draw_lanes.draw_lanes", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.cv2.line", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 45, "usage_type": "name"}, {"api_name": "cv2.cv2.line", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 46, "usage_type": "name"}, {"api_name": "cv2.cv2.line", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 54, "usage_type": "name"}, {"api_name": "directkeys.ReleaseKey", "line_number": 65, "usage_type": "call"}, {"api_name": "directkeys.Q", "line_number": 65, "usage_type": "argument"}, {"api_name": "directkeys.ReleaseKey", "line_number": 66, "usage_type": "call"}, {"api_name": "directkeys.D", "line_number": 66, "usage_type": "argument"}, {"api_name": "directkeys.PressKey", "line_number": 67, "usage_type": "call"}, {"api_name": "directkeys.Z", "line_number": 67, "usage_type": "argument"}, {"api_name": "directkeys.ReleaseKey", "line_number": 70, "usage_type": "call"}, {"api_name": "directkeys.Z", "line_number": 70, "usage_type": "argument"}, {"api_name": "directkeys.ReleaseKey", "line_number": 71, "usage_type": "call"}, {"api_name": "directkeys.D", "line_number": 71, "usage_type": "argument"}, {"api_name": "directkeys.PressKey", "line_number": 72, "usage_type": "call"}, {"api_name": "directkeys.Q", "line_number": 72, "usage_type": "argument"}, {"api_name": "directkeys.ReleaseKey", "line_number": 75, "usage_type": "call"}, {"api_name": "directkeys.Z", "line_number": 75, "usage_type": "argument"}, {"api_name": "directkeys.ReleaseKey", "line_number": 76, "usage_type": "call"}, {"api_name": "directkeys.Q", "line_number": 76, "usage_type": "argument"}, {"api_name": "directkeys.PressKey", "line_number": 77, "usage_type": "call"}, {"api_name": "directkeys.D", "line_number": 77, "usage_type": "argument"}, {"api_name": "directkeys.ReleaseKey", "line_number": 80, "usage_type": "call"}, {"api_name": "directkeys.Z", "line_number": 80, "usage_type": "argument"}, {"api_name": "directkeys.ReleaseKey", "line_number": 81, "usage_type": "call"}, {"api_name": "directkeys.Q", "line_number": 81, "usage_type": "argument"}, {"api_name": "directkeys.ReleaseKey", "line_number": 82, "usage_type": "call"}, {"api_name": "directkeys.D", "line_number": 82, "usage_type": "argument"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "grabscreen.grab_screen", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 97, "usage_type": "call"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.cv2.imshow", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 100, "usage_type": "name"}, {"api_name": "cv2.cv2.imshow", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 101, "usage_type": "name"}, {"api_name": "cv2.cv2.cvtColor", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.cv2.COLOR_BGR2RGB", "line_number": 101, "usage_type": "attribute"}, {"api_name": "cv2.cv2.waitKey", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 111, "usage_type": "name"}, {"api_name": "cv2.cv2.destroyAllWindows", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 113, "usage_type": "name"}]}
+{"seq_id": "71995610517", "text": "#\n# Protocol implementation of the 433 MHz OOK protocol known as\n# - KaKu\n# - self-learning Nexa (Arctech?) remote\n#\n# 1. Application protocol: Each message sent over the radio is a 32\n# bit word, sent MSB first. The bit definitions are, from MSB to LSB:\n#\n# 26 bits: group address (unique for each remote control)\n# 1 bit: group flag (for addressing all switches of this group)\n# 1 bit: action on/off flag\n# 4 bits: switch id within this group (button number on remote)\n# (4 bits optional: dimmer level)\n#\n# An example from my Nexa remote, decoded, same order as on oscilloscope:\n#\n# 0 0 0 1 1 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 1 0\n# 0x 1 8 7 e 0 8 9 2\n# \\_________________________________________________/ \\/\\/\\_____/\n# group address gr on switch\n#\n# 2. Manchester encoding: The bits are manchester encoded when sent\n# over the air, so one application bit is sent as two bits (symbols)\n# over the air. An application layer 0 bit is sent on the radio as a\n# 0 symbol and a 1 symbol. A 1 is sent as 1-0. (Or the other way\n# around, depending on how you define the radio layer symbols).\n#\n# Dimming: An absolute dimming level can be appended to the message.\n# The on/off bit then takes on a third value is then sent as 00 in\n# the manchester coded representation.\n#\n# The message begins with a START symbol and ends with a STOP symbol.\n#\n# 3. Symbols on the radio layer: The radio uses on off keying (OOK),\n# where the carrier is sent for a high level and no carrier is sent\n# for a low level. The symbols are defined thus:\n#\n# |----|\n# START | |\n# | |_________________________________________________\n# 1T 10T\n#\n# |----|\n# 0 | |\n# | |_____\n# 1T 1T\n#\n# |----|\n# 1 | |\n# | |_________________________\n# 1T 5T\n#\n# |----|\n# STOP | |\n# | |____________________\n# 1T 4T\n#\n# The time unit T is about 260us. A 0 takes 0.52ms and 1 takes 1.56ms\n# to send, so an application layer message takes about 71ms to send\n# in its entirety.\n#\n# The first bits (00011000) of the example message look like this on\n# the modulation input of the radio transmitter (I represents\n# carrier, _ represents no carrier):\n# I__________I_I_____I_I_____I_I_____I_____I_I_____I_I_I_____I_I_____I_I_____...\n#\n# start 0 1 0 1 0 1 1 0 1 0 0 1 0 1 0 1 ...\n#\n\nimport logging\n\nclass KakuProtocol:\n BIT_TIME = 260e-6\n\n @staticmethod\n def encode_bytestream(bits):\n return bytearray(0)\n \n @staticmethod\n def bits(d, width):\n out = \"{0:0{width}b}\".format(d, width=width)\n assert(len(out) == width)\n return list(map(int, out))\n\n @staticmethod\n def bitlist_to_bytearray(bits):\n \"\"\"Return a left-aligned byte representation of the bits, zero-padded\"\"\"\n out = bytearray()\n n = 0\n for i, b in enumerate(bits):\n n = (n << 1) | b\n if i % 8 == 7:\n out.append(n)\n n = 0\n i += 1\n if i % 8:\n out.append(n << (8 - (i % 8)))\n return out\n\n @staticmethod\n def symbol(bit):\n return {\n 0: [ 1, 0, 1, 0, 0, 0, 0, 0 ],\n 1: [ 1, 0, 0, 0, 0, 0, 1, 0 ],\n \"start\": [ 1 ] + [ 0 ] * 10,\n \"stop\": [ 1 ] + [ 0 ] * 10\n }[bit]\n\n @classmethod\n def encode_message(cls, group_address, group_flag, on, switch_id, dim=None):\n log = logging.getLogger(\"proto\")\n log.debug(\"Encoding group=%06x switch=%d on=%d\" % (group_address, switch_id, int(on)))\n bits = cls.symbol(\"start\")\n for b in cls.bits(group_address, 26):\n bits += cls.symbol(b)\n bits += cls.symbol(int(group_flag == True))\n bits += cls.symbol(int(on == True))\n for b in cls.bits(switch_id, 4):\n bits += cls.symbol(b)\n bits += cls.symbol(\"stop\")\n return cls.bitlist_to_bytearray(bits)\n\nif __name__ == \"__main__\":\n tx_data = KakuProtocol.encode_message(0xaaaaaa, False, True, 1)\n print(tx_data.hex())\n", "repo_name": "JonasNorling/mqtt_rfm69_ook_switch", "sub_path": "protocol_kaku.py", "file_name": "protocol_kaku.py", "file_ext": "py", "file_size_in_byte": 4238, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "85", "api": [{"api_name": "logging.getLogger", "line_number": 111, "usage_type": "call"}]}
+{"seq_id": "973187727", "text": "import os\nimport sys\nimport openai\nimport pysrt\n\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\ntext_input = sys.stdin.read()\nsrt = pysrt.from_string(text_input)\n\nbase_prompt = (\n \"You are going to be an English to Korean translator.\"\n \"I will give you a transcript from a video talking about the difference\"\n \"different levels of software engineers.\"\n \"Please translate the following into polite Korean starting from\"\n \"[START] until [END]:\\n[START]\\n\"\n)\n\n\ndef translate(text):\n prompt = base_prompt + text + \"\\n[END]\"\n#\n res = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=prompt,\n max_tokens=3000,\n temperature=0\n )\n\n raw_translation = res.choices[0].text\n stripped_start = raw_translation.replace('[START]', '').strip()\n translation = stripped_start.replace('[END]', '').strip()\n return translation\n\n\nfor index, subtitle in enumerate(srt):\n subtitle.text = translate(subtitle.text)\n print(subtitle, flush=True)\n", "repo_name": "jerhage/gpt-vid-translator", "sub_path": "src/lib/server/scripts/translate.py", "file_name": "translate.py", "file_ext": "py", "file_size_in_byte": 999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "openai.api_key", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.stdin.read", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pysrt.from_string", "line_number": 9, "usage_type": "call"}, {"api_name": "openai.Completion.create", "line_number": 23, "usage_type": "call"}, {"api_name": "openai.Completion", "line_number": 23, "usage_type": "attribute"}]}
+{"seq_id": "28706081664", "text": "import inspect\nimport unittest\n\nfrom transformers import is_torch_available, is_vision_available\nfrom transformers.models.auto import get_values\nfrom transformers.testing_utils import require_torch, slow, torch_device\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_torch_available():\n import torch\n\n from transformers import MODEL_MAPPING, PoolFormerConfig, PoolFormerForImageClassification, PoolFormerModel\n from transformers.models.poolformer.modeling_poolformer import POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\nif is_vision_available():\n from PIL import Image\n\n from transformers import PoolFormerImageProcessor\n\n\nclass PoolFormerConfigTester(ConfigTester):\n def create_and_test_config_common_properties(self):\n config = self.config_class(**self.inputs_dict)\n self.parent.assertTrue(hasattr(config, \"hidden_sizes\"))\n self.parent.assertTrue(hasattr(config, \"num_encoder_blocks\"))\n\n\nclass PoolFormerModelTester:\n def __init__(\n self,\n parent,\n batch_size=13,\n image_size=64,\n num_channels=3,\n num_encoder_blocks=4,\n depths=[2, 2, 2, 2],\n sr_ratios=[8, 4, 2, 1],\n hidden_sizes=[16, 32, 64, 128],\n downsampling_rates=[1, 4, 8, 16],\n is_training=False,\n use_labels=True,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n initializer_range=0.02,\n num_labels=3,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.image_size = image_size\n self.num_channels = num_channels\n self.num_encoder_blocks = num_encoder_blocks\n self.sr_ratios = sr_ratios\n self.depths = depths\n self.hidden_sizes = hidden_sizes\n self.downsampling_rates = downsampling_rates\n self.is_training = is_training\n self.use_labels = use_labels\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.scope = scope\n\n def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n labels = None\n if self.use_labels:\n labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)\n\n config = PoolFormerConfig(\n image_size=self.image_size,\n num_channels=self.num_channels,\n num_encoder_blocks=self.num_encoder_blocks,\n depths=self.depths,\n hidden_sizes=self.hidden_sizes,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n initializer_range=self.initializer_range,\n )\n\n return config, pixel_values, labels\n\n def create_and_check_model(self, config, pixel_values, labels):\n model = PoolFormerModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(pixel_values)\n expected_height = expected_width = self.image_size // 32.0\n self.parent.assertEqual(\n result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width)\n )\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, pixel_values, labels = config_and_inputs\n inputs_dict = {\"pixel_values\": pixel_values}\n return config, inputs_dict\n\n\n@require_torch\nclass PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):\n all_model_classes = (PoolFormerModel, PoolFormerForImageClassification) if is_torch_available() else ()\n pipeline_model_mapping = (\n {\"feature-extraction\": PoolFormerModel, \"image-classification\": PoolFormerForImageClassification}\n if is_torch_available()\n else {}\n )\n\n test_head_masking = False\n test_pruning = False\n test_resize_embeddings = False\n test_torchscript = False\n has_attentions = False\n\n def setUp(self):\n self.model_tester = PoolFormerModelTester(self)\n self.config_tester = PoolFormerConfigTester(self, config_class=PoolFormerConfig)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n @unittest.skip(\"PoolFormer does not use inputs_embeds\")\n def test_inputs_embeds(self):\n pass\n\n @unittest.skip(\"PoolFormer does not have get_input_embeddings method and get_output_embeddings methods\")\n def test_model_common_attributes(self):\n pass\n\n def test_hidden_states_output(self):\n def check_hidden_states_output(inputs_dict, config, model_class):\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n hidden_states = outputs.hidden_states\n\n expected_num_layers = self.model_tester.num_encoder_blocks\n self.assertEqual(len(hidden_states), expected_num_layers)\n\n # verify the first hidden states (first block)\n self.assertListEqual(\n list(hidden_states[0].shape[-3:]),\n [\n self.model_tester.hidden_sizes[0],\n self.model_tester.image_size // 4,\n self.model_tester.image_size // 4,\n ],\n )\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(inputs_dict, config, model_class)\n\n # check that output_hidden_states also work using config\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n\n check_hidden_states_output(inputs_dict, config, model_class)\n\n def test_training(self):\n if not self.model_tester.is_training:\n return\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n for model_class in self.all_model_classes:\n if model_class in get_values(MODEL_MAPPING):\n continue\n model = model_class(config)\n model.to(torch_device)\n model.train()\n inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n loss = model(**inputs).loss\n loss.backward()\n\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.forward)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = PoolFormerModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n# We will verify our results on an image of cute cats\ndef prepare_img():\n image = Image.open(\"./tests/fixtures/tests_samples/COCO/000000039769.png\")\n return image\n\n\n@require_torch\nclass PoolFormerModelIntegrationTest(unittest.TestCase):\n @slow\n def test_inference_image_classification_head(self):\n image_processor = PoolFormerImageProcessor()\n model = PoolFormerForImageClassification.from_pretrained(\"sail/poolformer_s12\").to(torch_device)\n\n inputs = image_processor(images=prepare_img(), return_tensors=\"pt\").to(torch_device)\n\n # forward pass\n with torch.no_grad():\n outputs = model(**inputs)\n\n # verify the logits\n expected_shape = torch.Size((1, 1000))\n self.assertEqual(outputs.logits.shape, expected_shape)\n\n expected_slice = torch.tensor([-0.6113, 0.1685, -0.0492]).to(torch_device)\n self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))\n", "repo_name": "huggingface/transformers", "sub_path": "tests/models/poolformer/test_modeling_poolformer.py", "file_name": "test_modeling_poolformer.py", "file_ext": "py", "file_size_in_byte": 8655, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 115573, "dataset": "github-code", "pt": "85", "api": [{"api_name": "transformers.is_torch_available", "line_number": 13, "usage_type": "call"}, {"api_name": "transformers.is_vision_available", "line_number": 20, "usage_type": "call"}, {"api_name": "test_configuration_common.ConfigTester", "line_number": 26, "usage_type": "name"}, {"api_name": "test_modeling_common.floats_tensor", "line_number": 71, "usage_type": "call"}, {"api_name": "test_modeling_common.ids_tensor", "line_number": 75, "usage_type": "call"}, {"api_name": "transformers.PoolFormerConfig", "line_number": 77, "usage_type": "call"}, {"api_name": "transformers.PoolFormerModel", "line_number": 91, "usage_type": "call"}, {"api_name": "transformers.testing_utils.torch_device", "line_number": 92, "usage_type": "argument"}, {"api_name": "test_modeling_common.ModelTesterMixin", "line_number": 108, "usage_type": "name"}, {"api_name": "test_pipeline_mixin.PipelineTesterMixin", "line_number": 108, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 108, "usage_type": "attribute"}, {"api_name": "transformers.is_torch_available", "line_number": 109, "usage_type": "call"}, {"api_name": "transformers.PoolFormerModel", "line_number": 109, "usage_type": "name"}, {"api_name": "transformers.PoolFormerForImageClassification", "line_number": 109, "usage_type": "name"}, {"api_name": "transformers.is_torch_available", "line_number": 112, "usage_type": "call"}, {"api_name": "transformers.PoolFormerModel", "line_number": 111, "usage_type": "name"}, {"api_name": "transformers.PoolFormerForImageClassification", "line_number": 111, "usage_type": "name"}, {"api_name": "transformers.PoolFormerConfig", "line_number": 124, "usage_type": "name"}, {"api_name": "unittest.skip", "line_number": 133, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 137, "usage_type": "call"}, {"api_name": "transformers.testing_utils.torch_device", "line_number": 144, "usage_type": "argument"}, {"api_name": "torch.no_grad", "line_number": 147, "usage_type": "call"}, {"api_name": "transformers.models.auto.get_values", "line_number": 185, "usage_type": "call"}, {"api_name": "transformers.MODEL_MAPPING", "line_number": 185, "usage_type": "argument"}, {"api_name": "transformers.testing_utils.torch_device", "line_number": 188, "usage_type": "argument"}, {"api_name": "inspect.signature", "line_number": 199, "usage_type": "call"}, {"api_name": "transformers.models.poolformer.modeling_poolformer.POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "line_number": 208, "usage_type": "name"}, {"api_name": "transformers.PoolFormerModel.from_pretrained", "line_number": 209, "usage_type": "call"}, {"api_name": "transformers.PoolFormerModel", "line_number": 209, "usage_type": "name"}, {"api_name": "transformers.testing_utils.slow", "line_number": 206, "usage_type": "name"}, {"api_name": "transformers.testing_utils.require_torch", "line_number": 107, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 215, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 215, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 220, "usage_type": "attribute"}, {"api_name": "transformers.PoolFormerImageProcessor", "line_number": 223, "usage_type": "call"}, {"api_name": "transformers.testing_utils.torch_device", "line_number": 224, "usage_type": "argument"}, {"api_name": "transformers.PoolFormerForImageClassification.from_pretrained", "line_number": 224, "usage_type": "call"}, {"api_name": "transformers.PoolFormerForImageClassification", "line_number": 224, "usage_type": "name"}, {"api_name": "transformers.testing_utils.torch_device", "line_number": 226, "usage_type": "argument"}, {"api_name": "torch.no_grad", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.Size", "line_number": 233, "usage_type": "call"}, {"api_name": "transformers.testing_utils.torch_device", "line_number": 236, "usage_type": "argument"}, {"api_name": "torch.tensor", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.allclose", "line_number": 237, "usage_type": "call"}, {"api_name": "transformers.testing_utils.slow", "line_number": 221, "usage_type": "name"}, {"api_name": "transformers.testing_utils.require_torch", "line_number": 219, "usage_type": "name"}]}
+{"seq_id": "8819683623", "text": "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport logging\n\nfrom PySide import QtGui\nfrom PySide import QtCore\n\nimport matplotlib\nmatplotlib.use(\"Qt4Agg\")\nmatplotlib.rcParams['backend.qt4'] = 'PySide'\n\nimport mpl_qt.ui.main as main\nimport mpl_qt.ui.plot as plot\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass QuiverModel(object):\n\n def __init__(self, pxy, vxy):\n self.xy = pxy\n self.xyvalue = vxy\n\n\nclass TableModel(QtCore.QAbstractTableModel):\n\n def __init__(self, parent, pxy, vxy, *args):\n if pxy.shape != vxy.shape:\n raise ValueError(\"pxy and vxy have to be of same shape\")\n super(TableModel, self).__init__(parent, *args)\n self.model = np.concatenate([pxy, vxy], axis=1)\n self.header = [\"x\", \"y\", \"xvalue\", \"yvalue\"]\n\n def rowCount(self, parent):\n return len(self.model)\n\n def columnCount(self, parent):\n return len(self.model[0])\n\n def data(self, index, role):\n if not index.isValid():\n return None\n elif role != QtCore.Qt.DisplayRole:\n return None\n return str(self.model[index.row(), index.column()])\n\n def headerData(self, col, orientation, role):\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n return self.header[col]\n return None\n\n# def sort(self, col, order):\n # \"\"\"sort table by given column number col\"\"\"\n # self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n # self.mylist = sorted(self.mylist,\n # key=operator.itemgetter(col))\n # if order == Qt.DescendingOrder:\n # self.mylist.reverse()\n # self.emit(SIGNAL(\"layoutChanged()\"))\n\n\nclass MainWindow(QtGui.QMainWindow, main.Ui_MainWindow):\n\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n\n # define model\n gx, gy = np.meshgrid(np.linspace(-5, 5, 5), np.linspace(-5, 5, 5))\n pxy = np.array([gx.flatten(), gy.flatten()]).T\n phi = 0.5 * np.pi\n m = np.array([[np.cos(phi), np.sin(phi)],\n [-np.sin(phi), np.cos(phi)]])\n pxy2 = np.dot(pxy, m)\n #pxy2 = pxy + np.array([1, 0])\n vxy = pxy - pxy2\n\n # set up views and signals and slots\n LOGGER.debug(\"set up\")\n self.model = QuiverModel(pxy, vxy)\n self.quiver_plot = plot.QuiverPlotWidget(parent=self, model=self.model)\n self.mesh_plot = plot.MeshplotWidget(parent=self, model=self.model)\n\n self.tabWidget.addTab(self.quiver_plot, \"quiver\")\n self.tabWidget.addTab(self.mesh_plot, \"mesh\")\n\n self.table_view = QtGui.QTableView()\n self.tmodel = TableModel(self, pxy, vxy)\n self.table_view.setModel(self.tmodel)\n self.tabWidget.addTab(self.table_view, \"data\")\n\n self.scaleEdit.setText(str(self.quiver_plot.scale))\n self.scaleEdit.editingFinished.connect(self.on_edit_scale)\n self.keylengthEdit.setText(str(self.quiver_plot.key_length))\n self.keylengthEdit.editingFinished.connect(self.on_edit_key_length)\n\n def on_edit_key_length(self):\n LOGGER.debug(\"on_edit_key_length\")\n self.quiver_plot.key_length = float(self.keylengthEdit.text())\n\n def on_edit_scale(self):\n LOGGER.debug(\"on_edit_scale\")\n scale = float(self.scaleEdit.text())\n self.quiver_plot.scale = scale\n self.mesh_plot.scale = scale\n", "repo_name": "micviklui/mpl_qt", "sub_path": "mpl_qt/ui/ui.py", "file_name": "ui.py", "file_ext": "py", "file_size_in_byte": 3555, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "matplotlib.use", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "PySide.QtCore.QAbstractTableModel", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PySide.QtCore", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 35, "usage_type": "call"}, {"api_name": "PySide.QtCore.Qt", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PySide.QtCore", "line_number": 47, "usage_type": "name"}, {"api_name": "PySide.QtCore.Qt", "line_number": 52, "usage_type": "attribute"}, {"api_name": "PySide.QtCore", "line_number": 52, "usage_type": "name"}, {"api_name": "PySide.QtGui.QMainWindow", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PySide.QtGui", "line_number": 66, "usage_type": "name"}, {"api_name": "mpl_qt.ui.main.Ui_MainWindow", "line_number": 66, "usage_type": "attribute"}, {"api_name": "mpl_qt.ui.main", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 78, "usage_type": "call"}, {"api_name": "mpl_qt.ui.plot.QuiverPlotWidget", "line_number": 85, "usage_type": "call"}, {"api_name": "mpl_qt.ui.plot", "line_number": 85, "usage_type": "name"}, {"api_name": "mpl_qt.ui.plot.MeshplotWidget", "line_number": 86, "usage_type": "call"}, {"api_name": "mpl_qt.ui.plot", "line_number": 86, "usage_type": "name"}, {"api_name": "PySide.QtGui.QTableView", "line_number": 91, "usage_type": "call"}, {"api_name": "PySide.QtGui", "line_number": 91, "usage_type": "name"}]}
+{"seq_id": "24584479895", "text": "from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict\nfrom collections import OrderedDict\nfrom torch_geometric.nn.conv.gen_conv import GENConv\nfrom torch_geometric.nn.conv.graph_conv import GraphConv\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor, device, dtype\n\nfrom torch_geometric.nn.conv import GCNConv, SAGEConv, GCN2Conv, SGConv\nfrom .layers import QBatchNorm1d, QLinear, QReLU, QGCNConv, QDropout, QDropout2, QSAGEConv, QGENConv, QGraphConv, QGCN2Conv\nfrom .conf import config\nfrom .gatconv import CustomGATConv, QCustomGATConv\n\n\nclass QModule(nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n QModule.convert_layers(model)\n\n @staticmethod\n def convert_layers(module):\n for name, child in module.named_children():\n # Do not convert layers that are already quantized\n if isinstance(child, (QBatchNorm1d, QLinear, QReLU, QGCNConv, QGCN2Conv, QDropout, QSAGEConv, QGENConv, QCustomGATConv)):\n continue\n if isinstance(child, nn.BatchNorm1d) and config.enable_quantized_bn:\n setattr(module, name, QBatchNorm1d(child.num_features, child.eps, child.momentum,\n child.affine, child.track_running_stats))\n elif isinstance(child, nn.Linear):\n setattr(module, name, QLinear(child.in_features, child.out_features,\n child.bias is not None))\n elif isinstance(child, nn.ReLU):\n setattr(module, name, QReLU())\n elif isinstance(child, nn.Dropout):\n if config.dropout2:\n setattr(module, name, QDropout2(child.p))\n else:\n setattr(module, name, QDropout(child.p))\n elif isinstance(child, GCNConv):\n setattr(module, name, QGCNConv(child.in_channels, child.out_channels, child.improved, child.cached,\n child.add_self_loops, child.normalize, child.bias is not None,\n aggr=child.aggr))\n elif isinstance(child, GCN2Conv):\n beta = child.beta\n shared_weights = child.weight2 is None\n setattr(module, name, QGCN2Conv(child.channels, alpha=child.alpha, theta=None, layer=None, shared_weights=shared_weights,\n cached=child.cached, add_self_loops=child.add_self_loops, normalize=child.normalize))\n curconv = getattr(module, name)\n curconv.beta = child.beta\n elif isinstance(child, SAGEConv):\n setattr(module, name, QSAGEConv(child.in_channels, child.out_channels, child.normalize, child.root_weight,\n child.lin_l.bias is not None))\n elif isinstance(child, GraphConv):\n setattr(module, name, QGraphConv(child.in_channels, child.out_channels, child.aggr, child.lin_l.bias is not None))\n elif isinstance(child, CustomGATConv):\n setattr(module, name, QCustomGATConv(child.in_channels, child.out_channels, child.heads, child.concat, \n child.negative_slope, child.dropout, child.add_self_loops, child.bias is not None, \n child.residual, child.use_attn_dst))\n elif isinstance(child, GENConv):\n msg_norm = child.msg_norm is not None\n learn_msg_scale = True if (msg_norm and child.msg_norm.scale.requires_grad) else False\n learn_p = isinstance(child.p, torch.nn.Parameter)\n is_softmax = child.aggr == 'softmax'\n if is_softmax and isinstance(child.t, torch.nn.Parameter):\n learn_t = True\n else:\n learn_t = False\n num_layers = 0\n norm = 'batch'\n for m in child.mlp:\n if isinstance(m, torch.nn.Linear):\n num_layers += 1\n if isinstance(m, (nn.BatchNorm1d, nn.LayerNorm, nn.InstanceNorm1d)):\n if isinstance(m, nn.BatchNorm1d):\n pass\n elif isinstance(m, nn.LayerNorm):\n norm = 'layer'\n elif isinstance(m, nn.InstanceNorm1d):\n norm = 'instance'\n\n setattr(module, name, QGENConv(child.in_channels, child.out_channels, child.aggr, child.initial_t, learn_t, child.initial_p, learn_p, \n msg_norm, learn_msg_scale, norm, num_layers, child.eps))\n else:\n QModule.convert_layers(child)\n\n def forward(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def load_state_dict(self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],\n strict: bool = True):\n # remove the prefix \"model.\" added by this wrapper\n new_state_dict = OrderedDict([(\"model.\" + k, v) for k, v in state_dict.items()])\n return super().load_state_dict(new_state_dict, strict)\n\n def state_dict(self, destination=None, prefix='', keep_vars=False):\n ret = super().state_dict(destination, prefix, keep_vars)\n\n # remove the prefix \"model.\" added by this wrapper\n ret = OrderedDict([(k[6:], v) for k, v in ret.items()])\n return ret\n\n def reset_parameters(self):\n self.model.reset_parameters()\n\n\n @torch.no_grad()\n def mini_inference(self, x_all, loader):\n return self.model.mini_inference(x_all, loader)", "repo_name": "warai-0toko/Exact", "sub_path": "exact/exact/module.py", "file_name": "module.py", "file_ext": "py", "file_size_in_byte": 5823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "85", "api": [{"api_name": "torch.nn.Module", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "layers.QBatchNorm1d", "line_number": 28, "usage_type": "name"}, {"api_name": "layers.QLinear", "line_number": 28, "usage_type": "name"}, {"api_name": "layers.QReLU", "line_number": 28, "usage_type": "name"}, {"api_name": "layers.QGCNConv", "line_number": 28, "usage_type": "name"}, {"api_name": "layers.QGCN2Conv", "line_number": 28, "usage_type": "name"}, {"api_name": "layers.QDropout", "line_number": 28, "usage_type": "name"}, {"api_name": "layers.QSAGEConv", "line_number": 28, "usage_type": "name"}, {"api_name": "layers.QGENConv", "line_number": 28, "usage_type": "name"}, {"api_name": "gatconv.QCustomGATConv", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "conf.config.enable_quantized_bn", "line_number": 30, "usage_type": "attribute"}, {"api_name": "conf.config", "line_number": 30, "usage_type": "name"}, {"api_name": "layers.QBatchNorm1d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "layers.QLinear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "layers.QReLU", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "conf.config.dropout2", "line_number": 39, "usage_type": "attribute"}, {"api_name": "conf.config", "line_number": 39, "usage_type": "name"}, {"api_name": "layers.QDropout2", "line_number": 40, "usage_type": "call"}, {"api_name": "layers.QDropout", "line_number": 42, "usage_type": "call"}, {"api_name": "torch_geometric.nn.conv.GCNConv", "line_number": 43, "usage_type": "argument"}, {"api_name": "layers.QGCNConv", "line_number": 44, "usage_type": "call"}, {"api_name": "torch_geometric.nn.conv.GCN2Conv", "line_number": 47, "usage_type": "argument"}, {"api_name": "layers.QGCN2Conv", "line_number": 50, "usage_type": "call"}, {"api_name": "torch_geometric.nn.conv.SAGEConv", "line_number": 54, "usage_type": "argument"}, {"api_name": "layers.QSAGEConv", "line_number": 55, "usage_type": "call"}, {"api_name": "torch_geometric.nn.conv.graph_conv.GraphConv", "line_number": 57, "usage_type": "argument"}, {"api_name": "layers.QGraphConv", "line_number": 58, "usage_type": "call"}, {"api_name": "gatconv.CustomGATConv", "line_number": 59, "usage_type": "argument"}, {"api_name": "gatconv.QCustomGATConv", "line_number": 60, "usage_type": "call"}, {"api_name": "torch_geometric.nn.conv.gen_conv.GENConv", "line_number": 63, "usage_type": "argument"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn.InstanceNorm1d", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.InstanceNorm1d", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "layers.QGENConv", "line_number": 85, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 93, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 96, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 110, "usage_type": "call"}]}
+{"seq_id": "18087371147", "text": "from lib.color_printer import ColorPrinter\nclass SqsDefaults:\n def __init__(self, config, config_data):\n self.config = config\n try:\n self.deadletter = config_data['deadletter']\n self.env = config_data['env']\n self.max_receive_count = config_data['max_receive_count']\n self.message_retention_period = config_data['message_retention_period']\n self.region = config_data['region']\n self.visibility_timeout = config_data['visibility_timeout']\n except KeyError as e:\n print(f\"Required key {e} not found in sqs_config.\")\n if config.verbose:\n print(\"Success loading SqsDefaults.\")\n\n\n def dump(self):\n cp = ColorPrinter(self.config.verbose)\n print(\"SqsDefaults are:\")\n cp.puts(f\" deadletter: {self.deadletter}\")\n cp.puts(f\" env: {self.env}\")\n cp.puts(f\" region: {self.region}\")\n cp.puts(f\" max_receive_count: {self.max_receive_count}\")\n cp.puts(f\" message_retention_period: {self.message_retention_period}\")\n cp.puts(f\" visibility_timeout: {self.visibility_timeout}\")\n\n", "repo_name": "mauricionr/jararaca", "sub_path": "lib/sqs_defaults.py", "file_name": "sqs_defaults.py", "file_ext": "py", "file_size_in_byte": 1110, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "lib.color_printer.ColorPrinter", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "9079559145", "text": "from sklearn.datasets import load_digits\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom IPython.display import clear_output\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nmnist = load_digits(2)\r\n\r\nX,y = mnist.data, mnist.target\r\n\r\nprint(\"y [shape - %s]:\" % (str(y.shape)), y[:10])\r\nprint(\"X [shape - %s]:\" % (str(X.shape)))\r\n\r\nprint('X:\\n',X[:3,:10])\r\nprint('y:\\n',y[:10])\r\nplt.imshow(X[0].reshape([8,8]))\r\nplt.show()\r\n\r\nweights = tf.Variable(initial_value=np.ones(5))\r\ninput_X = tf.placeholder(\"float32\", shape=(None, None,))\r\ninput_y = tf.placeholder(\"float32\", shape=(None, ))\r\n\r\npredicted_y = tf.placeholder(\"float32\", shape=(None, ))\r\nloss = tf.reduce_mean(predicted_y - input_y)\r\noptimizer = tf.train.MomentumOptimizer(0.01,0.9).minimize(loss, var_list=predicted_y)\r\n\r\ntrain_function = tf.losses.log_loss(X, y, weights)\r\npredict_function = tf.nn.top_k(X)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y)\r\n\r\nfor i in range(5):\r\n loss_i = loss(i)\r\n\r\n print(\"loss at iter %i:%.4f\" % (i, loss_i))\r\n\r\n print(\"train auc:\", roc_auc_score(y_train, predict_function(X_train)))\r\n print(\"test auc:\", roc_auc_score(y_test, predict_function(X_test)))\r\n\r\nprint(\"resulting weights:\")\r\nplt.imshow(tf.contrib.keras.backend.get_value(weights).reshape(8, -1))\r\nplt.colorbar()\r\nplt.show()", "repo_name": "Gvein/DataMiningInAction", "sub_path": "Logistic regression.py", "file_name": "Logistic regression.py", "file_ext": "py", "file_size_in_byte": 1388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "sklearn.datasets.load_digits", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "tensorflow.Variable", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.train.MomentumOptimizer", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.losses.log_loss", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.losses", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.top_k", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "tensorflow.contrib.keras.backend.get_value", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 43, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}]}
+{"seq_id": "16769992145", "text": "import urllib\nfrom paypal_settings import settings\nfrom datetime import datetime\nfrom models import paypal_ec_token\nfrom paypal_tools import api_tools\nfrom exceptions import invalid_response_exception\n\nclass direct_payment(object, api_tools):\n \"\"\"\n Communicate with Paypal, a particular merchant services provider.\n \"\"\"\n\n def __init__(self, data, settings = settings()):\n \"\"\"\n Constructor\n \n @param data dictionary of transaction attribute values indexed by name\n @type data dict\n \"\"\"\n\n self.data = data\n\n # Paypal requires a 4-digit year, which is unusual\n current_year = str(datetime.utcnow().year)\n ed = str(self.data['exp_date'])\n new_year = current_year[:2] + ed[2:]\n # That's right, I'm assuming that the expiration is within 100 years.\n # Youwannafightaboutit????\n if int(new_year) < int(current_year):\n new_year = str(int(current_year[:2]) + 1) + ed\n self.data['exp_date'] = ed[:2] + new_year\n\n # Enforce length limits as defined by Paypal\n self.data['invoice_number'] = self.data['invoice_number'][:127]\n self.data['address_label'] = self.data['address_label'][:100]\n self.data['first_name'] = self.data['first_name'][:25]\n self.data['last_name'] = self.data['last_name'][:25]\n self.data['country'] = self.data['country'][:2]\n self.data['state'] = self.data['state'][:40]\n self.data['city'] = self.data['city'][:40]\n self.data['zip'] = self.data['zip'][:20]\n\n self.settings = settings\n \n def translate(self, data):\n \"\"\"\n Convert attribute names from our own schema to Paypal's.\n \n @param data dictionary of attribute names and values that will be passed to the gateway\n @type data dict\n @return dictionary of attribute names and values with Paypal-compatible names.\n @rtype dict\n \"\"\"\n\n #: Paypal attribute names indexed by corresponding PR names.\n mapping = {\n 'transaction_id' : 'TRANSACTIONID',\n 'card_type' : 'CREDITCARDTYPE',\n 'invoice_number' : 'INVNUM',\n 'first_name' : 'FIRSTNAME',\n 'address_label' : 'STREET',\n 'country' : 'COUNTRYCODE',\n 'last_name' : 'LASTNAME',\n 'card_number' : 'ACCT',\n 'sales_tax' : 'TAXAMT',\n 'exp_date' : 'EXPDATE',\n 'ip' : 'IPADDRESS',\n 'state' : 'STATE',\n 'amount' : 'AMT',\n 'city' : 'CITY',\n 'cvv2' : 'CVV2',\n 'zip' : 'ZIP',\n }\n \n ret = {}\n for atr in data:\n d = data[atr]\n if d is not None:\n ret[mapping[atr]] = d\n return ret\n \n \n def charge(self):\n \"\"\"\n stub that calls the right method\n \"\"\"\n\n return self.DoDirectPayment()\n\n def credit(self):\n \"\"\"\n stub that calls the right method\n \"\"\"\n\n return self.RefundTransaction()\n\n def DoDirectPayment(self):\n \"\"\"\n Execute a transaction.\n \"\"\"\n\n # Translate attribute names to what Paypal wants to see.\n self.data = self.translate(self.data)\n # Add Paypal-specific transaction attributes.\n self.data.update({\n 'PAYMENTACTION' : 'Sale',\n 'METHOD' : 'DoDirectPayment',\n })\n\n url_data = urllib.urlencode(self.add_common_parameters(self.data))\n return self.gen_response(self.get_url(url_data))\n\n def RefundTransaction(self):\n \"\"\" returnd a transaction \"\"\"\n\n self.data = self.translate(self.data)\n url_data = urllib.urlencode(\n self.add_common_parameters({\n 'TRANSACTIONID' : self.data['TRANSACTIONID'],\n 'REFUNDTYPE' : 'Partial',\n 'METHOD' : 'RefundTransaction',\n 'AMT' : self.data['AMT'],\n }))\n\n return self.gen_response(self.get_url(url_data))\n\nclass express_checkout(object, api_tools):\n \"\"\"\n Use paypal's Express Checkout feature.\n \"\"\"\n\n def __init__(self, settings = settings()):\n self.settings = settings\n self.credentials = self.add_common_parameters({})\n\n def get_token_url(self, amount):\n \"\"\"\n Get the URL, including token, to use for accessing Paypal.\n \n @param amount Amount of sale\n \n @return URL\n \"\"\"\n\n data = self.credentials.copy()\n data.update({\n 'METHOD' : 'SetExpressCheckout',\n 'RETURNURL' : self.settings.return_url,\n 'CANCELURL' : self.settings.cancel_url,\n 'AMT' : amount,\n 'NOSHIPPING' : '1',\n })\n\n response = self.get_url(urllib.urlencode(data))\n if 'TOKEN' not in response:\n raise invalid_response_exception(str(response) + str(data))\n\n token = paypal_ec_token(token = response['TOKEN'], amount = amount,\n time=datetime.utcnow())\n token.save()\n\n # the 'useraction=commit' makes the paypal site appear to complete the transaction with\n # something like a \"Pay Now\" button, instead of sending the user back to us to review\n # the details before submitting a payment\n return token.token, '%s&useraction=commit&token=%s' % (settings.express_checkout_url,\n token.token)\n \n# vim:tabstop=4 shiftwidth=4 expandtab\n", "repo_name": "AmericanResearchInstitute/poweru-server", "sub_path": "ecommerce/paypal.py", "file_name": "paypal.py", "file_ext": "py", "file_size_in_byte": 5591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "85", "api": [{"api_name": "paypal_tools.api_tools", "line_number": 8, "usage_type": "name"}, {"api_name": "paypal_settings.settings", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "paypal_settings.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "urllib.urlencode", "line_number": 110, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 117, "usage_type": "call"}, {"api_name": "paypal_tools.api_tools", "line_number": 127, "usage_type": "name"}, {"api_name": "paypal_settings.settings", "line_number": 132, "usage_type": "call"}, {"api_name": "paypal_settings.settings", "line_number": 133, "usage_type": "name"}, {"api_name": "urllib.urlencode", "line_number": 154, "usage_type": "call"}, {"api_name": "exceptions.invalid_response_exception", "line_number": 156, "usage_type": "call"}, {"api_name": "models.paypal_ec_token", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 159, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 159, "usage_type": "name"}, {"api_name": "paypal_settings.settings.express_checkout_url", "line_number": 165, "usage_type": "attribute"}, {"api_name": "paypal_settings.settings", "line_number": 165, "usage_type": "name"}]}
+{"seq_id": "74499576555", "text": "import argparse\nfrom utils import config\nfrom dataset.data_loader import get_test_loader\nimport cv2\nimport os\n\nimport torch\nimport torch.nn as nn\n\ndef main():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg('--model', type=str, default='UNet', choices=['UNet'])\n arg('--output-path', type=str, default='./output/')\n arg('--config-file', type=str, default='./config/train_config.yaml')\n args = parser.parse_args()\n\n cfg = config.Config(args.config_file)\n csv_dir = cfg.csv_dir\n\n network = args.model\n model_weight_path = 'checkpoint/model_' + args.model + '.pt'\n\n data_loader = get_test_loader(cfg, args.image_path, csv_dir)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n network = nn.DataParallel(network)\n network.to(device)\n weight = torch.load(model_weight_path)\n network.load_state_dict(weight['model'])\n\n with torch.no_grad():\n network.eval()\n for test_image, test_name in data_loader:\n test_image = test_image.to(device)\n outputs = network(test_image)\n test_pred = outputs.squeeze().data.cpu().numpy()\n test_mask = (test_pred > cfg.Threshold).astype('int') * 255\n cv2.imwrite(os.path.join(args.output_path, '%s.png' % test_name), test_mask)\n\nif __name__ == '__main__':\n main()", "repo_name": "Howrunz/model_train", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "utils.config.Config", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.config", "line_number": 18, "usage_type": "name"}, {"api_name": "dataset.data_loader.get_test_loader", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}]}
+{"seq_id": "4381377373", "text": "import numpy as np\nfrom skimage.exposure import rescale_intensity, equalize_hist\nfrom skimage.transform import SimilarityTransform, warp, rotate, rescale, resize\n\n\ndef contrast_stretch_image(img, perc=99.8):\n # Apply it on each channel\n for channel in range(0, img.shape[2]):\n p_lower, p_upper = np.percentile(img[:, :, channel], (100 - perc, perc))\n img[:, :, channel] = rescale_intensity(img[:, :, channel], in_range=(p_lower, p_upper))\n return img\n\n\ndef histo_equalize_image(img):\n for channel in range(0, img.shape[2]):\n img[:, :, channel] = equalize_hist(img[:, :, channel])\n return img\n\n\ndef shifting(image, x_shift, y_shift):\n tform = SimilarityTransform(translation=(x_shift, y_shift))\n shifted_image = warp(image, tform, mode='constant', cval=0)\n return shifted_image\n\n\ndef flipping(image, flip_horizontal, flip_vertical):\n # Do horizontal and/or vertical flipping\n if flip_horizontal:\n image = np.flip(image, axis=1)\n if flip_vertical:\n image = np.flip(image, axis=0)\n return image\n\n\ndef rotation(image, rotation_degree):\n image = rotate(image, rotation_degree)\n return image\n\n\ndef zoom(image, x_dim, y_dim, zooming_factor):\n if len(image.shape) == 3:\n rescaled_image = rescale(image, zooming_factor, mode='reflect', anti_aliasing=True, multichannel=True)\n else:\n rescaled_image = rescale(image, zooming_factor, mode='reflect', anti_aliasing=True, multichannel=False)\n if zooming_factor > 1:\n left = round((rescaled_image.shape[0] - x_dim) / 2)\n right = left + x_dim\n upper = round((rescaled_image.shape[1] - y_dim) / 2)\n lower = upper + y_dim\n cropped_image = rescaled_image[upper:lower, left:right]\n else:\n left = round((x_dim - rescaled_image.shape[0]) / 2)\n right = left + rescaled_image.shape[0]\n upper = round((y_dim - rescaled_image.shape[1]) / 2)\n lower = upper + rescaled_image.shape[1]\n cropped_image = np.zeros(image.shape)\n if len(image.shape) == 2:\n cropped_image[upper:lower, left:right] = rescaled_image\n else:\n cropped_image[upper:lower, left:right, :] = rescaled_image\n return cropped_image\n\n\ndef zoom_resize(image, x_dim, y_dim, zooming_factor):\n if zooming_factor > 1:\n resized_image = resize(image, (round(zooming_factor * x_dim), round(zooming_factor * y_dim)),\n anti_aliasing=True, preserve_range=True)\n # print(round(zooming_factor * x_dim))\n # print(round(zooming_factor * y_dim))\n left = round((round(zooming_factor * x_dim) - x_dim) / 2)\n upper = round((round(zooming_factor * y_dim) - y_dim) / 2)\n right = left + x_dim\n lower = upper + y_dim\n cropped_image = resized_image[upper:lower, left:right]\n else:\n resized_image = resize(image, (round(zooming_factor * x_dim), round(zooming_factor * y_dim)),\n anti_aliasing=True, preserve_range=True)\n # print(round(zooming_factor * x_dim))\n # print(round(zooming_factor * y_dim))\n left = round((x_dim - round(zooming_factor * x_dim)) / 2)\n upper = round((y_dim - round(zooming_factor * y_dim)) / 2)\n right = left + round(zooming_factor * x_dim)\n lower = upper + round(zooming_factor * y_dim)\n # print(upper, lower, left, right)\n cropped_image = np.zeros(image.shape)\n if len(image.shape) == 2:\n cropped_image[upper:lower, left:right] = resized_image\n else:\n cropped_image[upper:lower, left:right, :] = resized_image\n return cropped_image\n\n\ndef signal_reduction(image, channel, signal_reduction_factor):\n if channel == 0:\n image[:, :, 0] = signal_reduction_factor * image[:, :, 0]\n if channel == 1:\n image[:, :, 1] = signal_reduction_factor * image[:, :, 1]\n if channel == 2:\n image[:, :, 2] = signal_reduction_factor * image[:, :, 2]\n return image", "repo_name": "imsb-uke/podometric_u_net", "sub_path": "network/dataset/image_transformations.py", "file_name": "image_transformations.py", "file_ext": "py", "file_size_in_byte": 3995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "numpy.percentile", "line_number": 9, "usage_type": "call"}, {"api_name": "skimage.exposure.rescale_intensity", "line_number": 10, "usage_type": "call"}, {"api_name": "skimage.exposure.equalize_hist", "line_number": 16, "usage_type": "call"}, {"api_name": "skimage.transform.SimilarityTransform", "line_number": 21, "usage_type": "call"}, {"api_name": "skimage.transform.warp", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 31, "usage_type": "call"}, {"api_name": "skimage.transform.rotate", "line_number": 36, "usage_type": "call"}, {"api_name": "skimage.transform.rescale", "line_number": 42, "usage_type": "call"}, {"api_name": "skimage.transform.rescale", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 66, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 85, "usage_type": "call"}]}
+{"seq_id": "11539415898", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nThis program creates a quit\nbutton. When we press the button,\nthe application terminates.\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QApplication\nfrom PyQt5.QtCore import QCoreApplication\n\nclass Example(QWidget):\n def __init__(self):\n super(Example, self).__init__()\n\n self.initUI()\n\n def initUI(self):\n qbtn = QPushButton('Quit', self)\n qbtn.clicked.connect(QCoreApplication.instance().quit)\n\n # 设置按钮默认大小\n qbtn.resize(qbtn.sizeHint())\n qbtn.move(50, 50)\n\n # 设置窗口的位置和大小\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('Quit button')\n self.show()\n\nif __name__ == \"__main__\":\n # 创建应用程序对象\n app = QApplication(sys.argv)\n # 创建用户界面对象\n ex = Example()\n sys.exit(app.exec_())\n\n\n", "repo_name": "yyHaker/PythonStudy", "sub_path": "modules/pyqt5_/basics/quit.py", "file_name": "quit.py", "file_ext": "py", "file_size_in_byte": 896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "73", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QCoreApplication.instance", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "19425164350", "text": "\"\"\"add language to recommend apps\n\nRevision ID: a45f4dfde53b\nRevises: 9f4e3427ea84\nCreate Date: 2023-05-25 17:50:32.052335\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a45f4dfde53b'\ndown_revision = '9f4e3427ea84'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('recommended_apps', schema=None) as batch_op:\n batch_op.add_column(sa.Column('language', sa.String(length=255), server_default=sa.text(\"'en-US'::character varying\"), nullable=False))\n batch_op.drop_index('recommended_app_is_listed_idx')\n batch_op.create_index('recommended_app_is_listed_idx', ['is_listed', 'language'], unique=False)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('recommended_apps', schema=None) as batch_op:\n batch_op.drop_index('recommended_app_is_listed_idx')\n batch_op.create_index('recommended_app_is_listed_idx', ['is_listed'], unique=False)\n batch_op.drop_column('language')\n\n # ### end Alembic commands ###\n", "repo_name": "langgenius/dify", "sub_path": "api/migrations/versions/a45f4dfde53b_add_language_to_recommend_apps.py", "file_name": "a45f4dfde53b_add_language_to_recommend_apps.py", "file_ext": "py", "file_size_in_byte": 1209, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10925, "dataset": "github-code", "pt": "73", "api": [{"api_name": "alembic.op.batch_alter_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}]}
+{"seq_id": "5216530549", "text": "\nimport json\nimport os\nfrom pathlib import Path\nimport sys\n\nsys.path.insert(0, '../')\nfrom Config import get_data_dir\n\nsys.path.insert(0, '../../Common/')\nfrom COCOWrapper import COCOWrapper, id_from_path\nfrom FormatData import mask_images_parallel\n\nif __name__ == '__main__':\n \n # Configuration\n label1 = sys.argv[1]\n label2 = sys.argv[2]\n spurious = sys.argv[3]\n \n tuple_dir = '{}/{}-{}/{}'.format(get_data_dir(), label1, label2, spurious)\n \n for mode in ['val', 'train']:\n mode_dir = '{}/{}'.format(tuple_dir, mode)\n\n # Setup COCO\n coco = COCOWrapper(mode = mode)\n coco.construct_id2img()\n \n # Load the Splits\n with open('{}/splits.json'.format(mode_dir), 'r') as f:\n splits = json.load(f)\n \n # Get which object is being removed\n chosen_class = spurious\n chosen_id = coco.get_class_id(chosen_class)\n \n class_type = 'spurious'\n unmask = False\n unmask_classes = None\n \n # Create the counterfactual images\n configs = [('1s', 1), ('0s', 0)]\n for config in configs:\n # Get the base images\n name = config[0]\n imgs = coco.get_imgs_by_ids(splits[name])\n\n # Get the label\n label = config[1]\n \n config_dir = '{}/{}-{}'.format(mode_dir, name, class_type)\n for mask_mode in ['box', 'pixel']:\n # Setup the output directory\n save_dir = '{}/{}'.format(config_dir, mask_mode)\n os.system('rm -rf {}'.format(save_dir))\n Path(save_dir).mkdir(parents = True)\n print(save_dir)\n\n # Maks the chosen object\n filenames, _ = mask_images_parallel(imgs, coco, \n save_dir,\n chosen_id = chosen_id, mode = mask_mode,\n unmask = unmask, unmask_classes = None,\n use_png = (mask_mode == 'pixel'))\n \n # Save the output\n images = {}\n for i in range(len(filenames)):\n filename = filenames[i]\n id = id_from_path(filename)\n images[id] = [filename, label]\n\n with open('{}/images.json'.format(save_dir), 'w') as f:\n json.dump(images, f)\n ", "repo_name": "GDPlumb/SPIRE", "sub_path": "COCO-nuanced/1-GenerateCounterfactuals/Remove.py", "file_name": "Remove.py", "file_ext": "py", "file_size_in_byte": 2537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "Config.get_data_dir", "line_number": 21, "usage_type": "call"}, {"api_name": "COCOWrapper.COCOWrapper", "line_number": 27, "usage_type": "call"}, {"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "os.system", "line_number": 56, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 57, "usage_type": "call"}, {"api_name": "FormatData.mask_images_parallel", "line_number": 61, "usage_type": "call"}, {"api_name": "COCOWrapper.id_from_path", "line_number": 71, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 75, "usage_type": "call"}]}
+{"seq_id": "21748197702", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n Sequence Labeling Model.\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport sys\nsys.path.append('..')\nfrom model.config import conf as conf\nfrom model.modules.bilstm import Bilstm\nfrom model.modules.crf import CRF\nfrom model.modules.feature import CharFeature, WordFeature, PositionFeature\nimport numpy as np\nclass Bilstm_LR_Model(nn.Module):\n\n def __init__(self, word_embeddings, word_require_grad,\n char_embedding_shape, filter_sizes, filter_nums, \n target_size, average_batch=True, use_cuda=True):\n \"\"\"\n Args:\n feature_names: list(str), 特征名称, 不包括`label`和`char`\n\n feature_size_dict: dict({str: int}), 特征表大小字典\n feature_dim_dict: dict({str: int}), 输入特征dim字典\n pretrained_embed_dict: dict({str: np.array})\n require_grad_dict: bool, 是否更新feature embedding的权重\n\n # char parameters\n use_char: bool, 是否使用字符特征, default is False\n filter_sizes: list(int), 卷积核尺寸, default is [3]\n filter_nums: list(int), 卷积核数量, default is [32]\n\n # rnn parameters\n rnn_unit_type: str, options: ['rnn', 'lstm', 'gru']\n num_rnn_units: int, rnn单元数\n num_layers: int, 层数\n bi_flag: bool, 是否双向, default is True\n\n use_crf: bool, 是否使用crf层\n\n dropout_rate: float, dropout rate\n\n average_batch: bool, 是否对batch的loss做平均\n use_cuda: bool\n \"\"\"\n super(Bilstm_LR_Model, self).__init__()\n word_embedding_shape = (len(word_embeddings),len(word_embeddings[0]))\n # word level feature layer\n self.word_feature_layer = WordFeature(word_embedding_shape, \n word_embeddings, word_require_grad)\n\n self.char_feature_layer = CharFeature(char_embedding_shape, \n filter_sizes, filter_nums)\n trans_input_dim = word_embedding_shape[1] + sum(filter_nums)\n # feature dropout\n self.dropout_feature = nn.Dropout(conf.dropout_rate)\n input_size = trans_input_dim\n # trans layer\n self.bilstm_layer = Bilstm(input_size, conf.trans_output_size)\n\n # trans dropout\n self.dropout_trans = nn.Dropout(conf.dropout_rate)\n\n # crf layer\n self.crf_layer = CRF(target_size, average_batch, use_cuda)\n\n # dense layer\n hidden_input_dim = conf.trans_output_size * 3\n ex_target_size = target_size + 2\n self.hidden2tag = nn.Linear(hidden_input_dim, ex_target_size)\n\n # loss\n self.loss_function = self.crf_layer.neg_log_likelihood_loss\n self.local_loss_function = nn.CrossEntropyLoss(reduce=False)\n self.local_loss_function_nil = self.CrossEntropyLoss_nil\n self.loss_function_ratio = self.crf_layer.neg_log_likelihood_loss_ratio\n self.loss_function_nil = self.crf_layer.neg_log_likelihood_loss_nil\n self.average_batch = average_batch\n self.begins = [0. for i in range(ex_target_size)]\n for i in conf.begin:\n self.begins[i] = 1.\n if conf.only_nil:\n self.begins[0] = 1.\n self.begins = torch.tensor(self.begins, dtype=torch.float32).cuda().view(1,ex_target_size)\n self.insides = [0. for i in range(ex_target_size)]\n for i in conf.inside:\n self.insides[i] = 1.\n if conf.only_nil:\n self.insides[0] = 1.\n self.insides = torch.tensor(self.insides, dtype=torch.float32).cuda().view(1,ex_target_size)\n \n def CrossEntropyLoss_nil(self, feats, tags):\n isBNil = (tags == conf.fuzzy2id['B-Nil'])\n isINil = (tags == conf.fuzzy2id['I-Nil'])\n notfuzzy = 1 - (isBNil + isINil)\n todiv = float(len(conf.begin))\n if conf.only_nil:\n todiv += 1\n loss = torch.sum(-feats*self.begins*(isBNil.float().unsqueeze(1))/todiv, dim=1)\n loss = loss+torch.sum(-feats*self.insides*(isINil.float().unsqueeze(1))/todiv, dim=1)\n temp = tags*notfuzzy.long()\n temp = temp.unsqueeze(1)\n\n temp = -torch.gather(feats,1,temp)\n temp = temp.squeeze()\n temp = temp*notfuzzy.float()\n loss = loss+temp\n loss = loss+ torch.log(torch.sum(torch.exp(feats), dim=1))\n return loss\n\n def loss(self, feats, mask, tags):\n \"\"\"\n Args:\n feats: size=(batch_size, seq_len, tag_size)\n mask: size=(batch_size, seq_len)\n tags: size=(batch_size, seq_len)\n \"\"\"\n loss_value = self.loss_function(feats, mask, tags)\n if self.average_batch:\n batch_size = feats.size(0)\n loss_value = loss_value/float(batch_size)\n return loss_value\n \n def fuzzy_loss(self, feats, mask, tags, locations, ratio = False):\n batch_size = tags.size(0)\n seq_len = tags.size(1)\n confirmed = np.zeros((batch_size,seq_len))\n for l in range(len(locations)):\n confirmed[l][locations[l]] = 1\n tags2 = tags*(torch.tensor(confirmed,dtype=torch.long).cuda())+torch.tensor(conf.NOT_CONFIRM_IDX*(1-confirmed),dtype=torch.long).cuda()\n \n if ratio:\n loss_value = self.loss_function_ratio(feats,mask,tags2,tags)\n else:\n loss_value = self.loss_function(feats,mask,tags2, fuzzy=True)\n if self.average_batch:\n loss_value = loss_value/float(batch_size)\n return loss_value\n\n def fuzzy_loss_nil(self, feats, mask, tags, locations):\n batch_size = tags.size(0)\n seq_len = tags.size(1)\n confirmed = np.zeros((batch_size,seq_len))\n for l in range(len(locations)):\n confirmed[l][locations[l]] = 1\n confirmed = torch.tensor(confirmed,dtype=torch.long).cuda()\n tags_notconf = tags*confirmed+conf.fuzzy2id['not_conf']*(1-confirmed)\n not_nil = (tags >= 0).long()\n\n tags_nofuzzy = tags*not_nil\n loss_value = self.loss_function_nil(feats,mask,tags_notconf,tags_nofuzzy)\n if self.average_batch:\n loss_value = loss_value/float(batch_size)\n return loss_value\n def local_loss(self, feats, tags, locations):\n seq_len = feats.size(1)\n flat_feats = feats.view(-1,feats.size(-1))\n flat_tags = tags.view(-1)\n \n if conf.use_nil:\n losses = self.local_loss_function_nil(flat_feats,flat_tags)\n else:\n losses = self.local_loss_function(flat_feats,flat_tags)\n flat_locations = []\n local_mask = torch.zeros(losses.size()).float().cuda()\n location_count = 0.\n for tempi in range(len(locations)):\n start = tempi * seq_len\n location_count = location_count+len(locations[tempi])\n for loc in locations[tempi]:\n local_mask[start + loc] = 1.\n losses = local_mask * losses\n local_losses = torch.sum(losses)/location_count\n return local_losses\n \n def weighted_local_loss(self,feats,tags,tags_np,locations,weight):\n seq_len = feats.size(1)\n flat_feats = feats.view(-1,feats.size(-1))\n flat_tags = tags.view(-1)\n tags_np = tags_np.reshape(-1)\n losses = self.local_loss_function(flat_feats,flat_tags)\n flat_locations = []\n local_mask = torch.zeros(losses.size()).float().cuda()\n location_count = 0.\n for tempi in range(len(locations)):\n start = tempi * seq_len\n location_count =location_count+ len(locations[tempi])\n for loc in locations[tempi]:\n local_mask[start + loc] = weight[tags_np[start+loc]]\n losses = local_mask * losses\n local_losses = torch.sum(losses)/location_count\n return local_losses\n def forward(self, word_input, char_input, mask, length):\n \"\"\"\n Args:\n inputs: list\n \"\"\"\n batch_size = word_input.size(0)\n max_len = word_input.size(1)\n\n # word level feature\n word_feature = self.word_feature_layer(word_input)\n\n # char level feature\n char_feature = self.char_feature_layer(char_input)\n\n try:\n word_feature = torch.cat([word_feature, char_feature], 2)\n except:\n print (word_feature.shape)\n print (char_feature.shape)\n print (word_input.shape)\n print (char_input.shape)\n print (mask.shape)\n print (word_input)\n print (char_input)\n print (mask)\n exit(0)\n word_feature = self.dropout_feature(word_feature)\n # transformer layer\n bilstm_outputs = self.bilstm_layer(word_feature, length)\n lefts = [torch.zeros(bilstm_outputs.size(0),1,bilstm_outputs.size(-1)).cuda()]\n rights = [torch.zeros(bilstm_outputs.size(0),1,bilstm_outputs.size(-1)).cuda()]\n for tempi in range(bilstm_outputs.size(1)-1):\n lefts.append(torch.max(lefts[-1],bilstm_outputs[:,tempi:tempi+1,:]))\n for tempi in range(bilstm_outputs.size(1)-1,0,-1):\n rights.append(torch.max(rights[-1],bilstm_outputs[:,tempi:tempi+1,:]))\n rights.reverse()\n trans_outputs_lr = torch.cat([torch.cat(lefts,dim=1),bilstm_outputs,torch.cat(rights,dim=1)],dim=2)\n trans_outputs_lr = self.dropout_trans(trans_outputs_lr.view(-1, trans_outputs_lr.size(-1)))\n trans_feats = self.hidden2tag(trans_outputs_lr)\n return trans_feats.view(batch_size, max_len, trans_feats.size(-1))\n\n def predict(self, bilstm_outputs, actual_lens, mask=None):\n batch_size = bilstm_outputs.size(0)\n tags_list = []\n path_score, best_paths = self.crf_layer(bilstm_outputs, mask)\n return best_paths.cpu().data.numpy()\n def local_predict(self, logits):\n return torch.argmax(logits,dim=2).view(-1)\n", "repo_name": "zig-kwin-hu/Low-Resource-Name-Tagging", "sub_path": "code/model/modules/bilstm_model.py", "file_name": "bilstm_model.py", "file_ext": "py", "file_size_in_byte": 9900, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "85", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "model.modules.feature.WordFeature", "line_number": 50, "usage_type": "call"}, {"api_name": "model.modules.feature.CharFeature", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "model.config.conf.dropout_rate", "line_number": 57, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 57, "usage_type": "name"}, {"api_name": "model.modules.bilstm.Bilstm", "line_number": 60, "usage_type": "call"}, {"api_name": "model.config.conf.trans_output_size", "line_number": 60, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "model.config.conf.dropout_rate", "line_number": 63, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 63, "usage_type": "name"}, {"api_name": "model.modules.crf.CRF", "line_number": 66, "usage_type": "call"}, {"api_name": "model.config.conf.trans_output_size", "line_number": 69, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "model.config.conf.begin", "line_number": 81, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 81, "usage_type": "name"}, {"api_name": "model.config.conf.only_nil", "line_number": 83, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 85, "usage_type": "attribute"}, {"api_name": "model.config.conf.inside", "line_number": 87, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 87, "usage_type": "name"}, {"api_name": "model.config.conf.only_nil", "line_number": 89, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "model.config.conf.fuzzy2id", "line_number": 94, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 94, "usage_type": "name"}, {"api_name": "model.config.conf.fuzzy2id", "line_number": 95, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 95, "usage_type": "name"}, {"api_name": "model.config.conf.begin", "line_number": 97, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 97, "usage_type": "name"}, {"api_name": "model.config.conf.only_nil", "line_number": 98, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.gather", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 131, "usage_type": "attribute"}, {"api_name": "model.config.conf.NOT_CONFIRM_IDX", "line_number": 131, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 147, "usage_type": "attribute"}, {"api_name": "model.config.conf.fuzzy2id", "line_number": 148, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 148, "usage_type": "name"}, {"api_name": "model.config.conf.use_nil", "line_number": 161, "usage_type": "attribute"}, {"api_name": "model.config.conf", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 241, "usage_type": "call"}]}
+{"seq_id": "7899967410", "text": "\nfrom modeling.surface import Surface\nfrom modeling.spectrum import Spectrum\nfrom modeling.retracking import Brown\nfrom modeling import rc\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\nbrown = Brown()\nsurf = Surface()\n \n\ndf = pd.read_excel(\"data/ModelMomentsCWM.xlsx\", header=[0,1,2], index_col=[0])\ndf0 = pd.read_excel(\"data/check.xlsx\", header=[0,1,2], index_col=[0])\n\nu = df[\"U\"]\nprint(u)\ndf = df[\"model\"][\"Ku\"]\ndf0 = df0[\"ryabkova\"][\"Ku\"]\n\n\ntheta = np.deg2rad(np.linspace(-17, 17, 100))\n\n\ncsu = np.zeros(u.size)\ncsu0 = np.zeros(u.size)\nfor i in range(u.size):\n moments = np.zeros(4)\n moments[:-1] = df.iloc[i][:-1]\n csu[i] = 10*np.log10(surf.crossSection(0, moments))\n moments[:-1] = df0.iloc[i][:-1]\n csu0[i] = 10*np.log10(surf.crossSection(0, moments))\n\ndfcs = pd.DataFrame({\"U\": u.values.flatten(), \"default\": csu0.flatten(), \"cwm\": csu.flatten()})\ndfcs.to_csv(\"crosssec_wind.tsv\", sep=\"\\t\", index=False)\n\n\nmoments = np.zeros(4)\nmoments[:-1] = df.iloc[7][:-1]\ncs = 10*np.log10(surf.crossSection(theta, moments))\nmoments[:-1] = df0.iloc[7][:-1]\ncs0 = 10*np.log10(surf.crossSection(theta, moments))\ndfcs = pd.DataFrame({\"theta\": np.rad2deg(theta), \"default\": cs0, \"cwm\": cs})\ndfcs.to_csv(\"crosssec10.tsv\", sep=\"\\t\", index=False)\n\nt = brown.t()\nP0 = brown.pulse(t)\nP0 *= cs0.max()/P0.max()\nP1 = brown.pulse(t, cwm=True)\nP1 *= cs.max()/P1.max()\n\nplt.plot(t, P0)\nplt.plot(t, P1)\nd = pd.DataFrame({\"t\": t, \"linear\": P0, \"cwm\": P1})\nd.to_csv(\"impulse_cwm10.tsv\", sep=\"\\t\", index=False)\n\n\n\nmoments[:-1] = df.iloc[0][:-1]\ncs = 10*np.log10(surf.crossSection(theta, moments))\nmoments[:-1] = df0.iloc[0][:-1]\ncs0 = 10*np.log10(surf.crossSection(theta, moments))\n\ndfcs = pd.DataFrame({\"theta\": np.rad2deg(theta), \"default\": cs0, \"cwm\": cs})\ndfcs.to_csv(\"crosssec3.tsv\", sep=\"\\t\", index=False)\n\nt = brown.t()\nP0 = brown.pulse(t)\nP0 *= cs0.max()/P0.max()\nP1 = brown.pulse(t, cwm=True)\nP1 *= cs.max()/P1.max()\n\nplt.plot(t, P0)\nplt.plot(t, P1)\nd = pd.DataFrame({\"t\": t, \"linear\": P0, \"cwm\": P1})\nd.to_csv(\"impulse_cwm3.tsv\", sep=\"\\t\", index=False)\n\n\n\n", "repo_name": "kannab98/modeling", "sub_path": "crosssec-pulse.py", "file_name": "crosssec-pulse.py", "file_ext": "py", "file_size_in_byte": 2100, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "modeling.retracking.Brown", "line_number": 12, "usage_type": "call"}, {"api_name": "modeling.surface.Surface", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "36964378615", "text": "\n\"\"\"\n@author: Andrew\n\"\"\"\n\n\nimport pandas as pd\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nurl = 'https://www.indeed.co.uk/data-scientist-jobs-in-London,-Greater-London'\n#url = 'https://https://www.indeed.co.uk/jobs?q=data+scientist&l=United+Kingdom'\n\nhtml = urlopen(url)\n\nsoup = BeautifulSoup(html, 'lxml')\ntype(soup)\n\n\njobs = []\n\ntitles = soup.find_all('h2', {\"class\" : \"title\"})\n\nlocations = soup.find_all('span', {\"class\" : \"location accessible-contrast-color-location\"})\n\nsalaries = soup.find_all('span', {\"class\" : \"salaryText\"})\n\n\ntitle = [span.get_text() for span in titles]\nsalary = [span.get_text() for span in salaries]\nlocation = [span.get_text() for span in locations]\n\nsalary = pd.Series(salary)\n\ntitle = pd.Series(title)\n\nlocation = pd.Series(location) \njobs = pd.DataFrame({'Title': title, 'Salary': salary, 'Location': location })\n\njobs.to_csv('DataFrame', index=False)", "repo_name": "Andrew-Storey1/ds_salary_proj", "sub_path": "soup_scraper.py", "file_name": "soup_scraper.py", "file_ext": "py", "file_size_in_byte": 908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "urllib.request.urlopen", "line_number": 14, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "29624837350", "text": "# Converts .jpg images to .png\n# Takes input and output folders as arguments\n\n# Import libraries\nimport sys\nimport os\nfrom PIL import Image\n\n# Error handling\ntry:\n # Get source folder and output folder\n location = str(sys.argv[1])\n new_folder = str(sys.argv[2])\nexcept IndexError:\n print(\"ERROR: Please enter input and output folders.\")\nelse:\n # Create output folder if it doesn't exist\n if not os.path.exists(new_folder):\n os.makedirs(new_folder)\n\n # Convert .jpg files in source folder to .png in output folder\n for file in os.listdir(location):\n if '.jpg' in file:\n img = Image.open(location + file)\n name = os.path.splitext(file)[0]\n img.save(new_folder + name + '.png', 'png')\n print(\"All done!\")\n", "repo_name": "nwferreri/JPG-to-PNG-converter", "sub_path": "JPGtoPNGconverter.py", "file_name": "JPGtoPNGconverter.py", "file_ext": "py", "file_size_in_byte": 776, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}]}
+{"seq_id": "17870716030", "text": "from PyQt5 import QtWidgets, QtCore, QtGui\nimport pyqtgraph as pg\nimport sys\nimport traceback\nimport csv, os, time, math\n\n\nclass MonitoringData(QtWidgets.QMainWindow):\n def __init__(self, pkg, device_name):\n super().__init__()\n self.pkg = pkg # 包名\n self.device_name = device_name # 设备名\n self.timer_interval(2000) # 数据刷新时间间隔\n self.data_list = []\n self.cpu_data = []\n self.memory_data = []\n self.fps_data = []\n # self.cpucsvfile = open('./CPU_' + time.strftime(\"%Y_%m_%d_%H_%M_%S\") + '.csv', 'w', encoding='utf8', newline='')\n # self.save_data('cpu', [('timestamp', 'CPU(%)')]) # 定义cpu数据列表title\n # self.memcsvfile = open('./Memory_' + time.strftime(\"%Y_%m_%d_%H_%M_%S\") + '.csv', 'w', encoding='utf8', newline='')\n # self.save_data('mem', [('timestamp', 'Memory(MB)')]) # 定义Memory数据列表title\n # self.fpscsvfile = open('./FPS_' + time.strftime(\"%Y_%m_%d_%H_%M_%S\") + '.csv', 'w', encoding='utf8', newline='')\n # self.save_data('fps', [('timestamp', 'FPS')]) # 定义FPS数据列表title\n # 创建监控窗口\n self.setWindowTitle(\"App性能数据显示\")\n self.App_monitoring_data = QtWidgets.QWidget() # 创建一个主部件\n self.setCentralWidget(self.App_monitoring_data) # 设置窗口默认部件\n self.resize(1200, 900) # 设置窗口大小\n # 创建cpu监控图像\n self.cpu_image = QtWidgets.QGridLayout() # 创建cpu网格布局\n self.App_monitoring_data.setLayout(self.cpu_image) # 设置cpu的主部件为网格\n self.cpu_plot_widget = QtWidgets.QWidget() # cpu的widget部件作为K线图部件\n self.plot_layout = QtWidgets.QGridLayout() # cpu的网格布局层\n self.cpu_plot_widget.setLayout(self.plot_layout) # 设置K线图部件的布局层\n self.cpu_plot_plt = pg.PlotWidget(title='CPU', left='CPU(%)') # cpu的绘图部件\n self.cpu_plot_plt.showGrid(x=True, y=True) # 显示cpu图形\n self.plot_layout.addWidget(self.cpu_plot_plt) # 添加绘图部件到K线图部件的网格布局层\n self.cpu_image.addWidget(self.cpu_plot_widget, 1, 0, 3, 3) # 将上述部件添加到布局层中\n self.cpu_plot_plt.setYRange(max=120, min=0) # 设置cpu的纵坐标范围\n # 创建Memory监控图像\n self.mem_image = QtWidgets.QGridLayout() # 创建memory网格布局\n self.App_monitoring_data.setLayout(self.mem_image) # 设置memory主部件的布局为网格\n self.mem_plot_widget = QtWidgets.QWidget() # memory的widget部件作为K线图部件\n self.mem_plot_layout = QtWidgets.QGridLayout() # memory的网格布局层\n self.mem_plot_widget.setLayout(self.mem_plot_layout) # 设置K线图部件的布局层\n self.mem_plot_plt = pg.PlotWidget(title='Memory', left='Pss Total(MB)') # memory绘图部件\n self.mem_plot_plt.showGrid(x=True, y=True) # 显示memory图形\n self.plot_layout.addWidget(self.mem_plot_plt) # 添加绘图部件到K线图部件的网格布局层\n self.mem_image.addWidget(self.mem_plot_widget, 1, 0, 3, 3) # 将上述部件添加到布局层中\n self.mem_plot_plt.setYRange(max=600, min=0) # 设置memory的纵坐标范围\n # 创建FPS监控图像\n self.fps_image = QtWidgets.QGridLayout() # 创建fps网格布局\n self.App_monitoring_data.setLayout(self.fps_image) # 设置fps主部件的布局为网格\n self.fps_plot_widget = QtWidgets.QWidget() # fps的widget部件作为K线图部件\n self.fps_plot_layout = QtWidgets.QGridLayout() # fps的网格布局层\n self.fps_plot_widget.setLayout(self.fps_plot_layout) # 设置K线图部件的布局层\n self.fps_plot_plt = pg.PlotWidget(title='FPS', left='FPS') # fps绘图部件\n self.fps_plot_plt.showGrid(x=True, y=True) # 显示fps图形网格\n self.plot_layout.addWidget(self.fps_plot_plt) # 添加绘图部件到K线图部件的网格布局层\n self.fps_image.addWidget(self.fps_plot_widget, 1, 0, 3, 3) # 将上述部件添加到布局层中\n self.fps_plot_plt.setYRange(max=70, min=0) # 设置fps的纵坐标范围\n\n def timer_interval(self, timeinterval):\n \"\"\"启动定时器 时间间隔秒\"\"\"\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self.get_cpu_info)\n self.timer.timeout.connect(self.get_memory_info)\n self.timer.timeout.connect(self.get_fps_info)\n self.timer.start(timeinterval)\n\n def get_current_time(self):\n \"\"\"获取当前时间\"\"\"\n currenttime = time.strftime(\"%H:%M:%S\", time.localtime())\n return currenttime\n\n def get_cpu_info(self):\n \"\"\"获取cpu数据\"\"\"\n try:\n result = os.popen(\"adb -s {} shell dumpsys cpuinfo | findstr {}\".format(self.device_name, self.pkg))\n print('-------')\n print(result)\n # result = os.popen(\"adb -s {} shell top -m 100 -n 1 -d 1 | findstr {}\".format(self.device_name, self.pkg)) # 执行adb命令\n res = result.readline().split(\" \") # 将获取的行数据使用空格进行分割\n print(res)\n if res == ['']: # 处理没有数据的情况\n print('no data')\n pass\n else:\n cpuvalue1 = list(filter(None, res))[2] # 获取cpu\n cpuvalue = cpuvalue1.strip('%') # 去除%号\n current_time = self.get_current_time()\n if cpuvalue == 'R': # 过滤cpu等于R\n pass\n else:\n cpu = float(cpuvalue)\n print(\"CPU:\", cpu)\n # self.save_data('cpu', [(current_time, cpuvalue)]) # 将数据保存到Excel\n self.data_list.append(cpu) # 将数据写入列表\n self.cpu_plot_plt.plot().setData(self.data_list, pen='g') # 将数据载入图像中\n except Exception as e:\n print(traceback.print_exc())\n\n def get_memory_info(self):\n \"\"\"获取Memory数据\"\"\"\n try:\n result = os.popen(\"adb -s {} shell dumpsys meminfo {}\".format(self.device_name, self.pkg)) # 执行adb命令\n res = result.readlines()\n for line in res:\n if \"TOTAL:\" in line: # 不同手机adb shell dumpsys meminfo packagename 获取的Pss Total 不同,有的手机是TOTAL:,有的是TOTAL PSS:,这里做了一下兼容\n print(line)\n pss_total1 = line.split(\" \")[18] # 将获取的行数据使用空格进行分割并取出第 18个元素\n elif 'TOTAL PSS:' in line:\n print(line)\n pss_total1 = line.split(\" \")[15] # 将获取的行数据使用空格进行分割并取出第 15个元素\n else:\n continue\n pss_total = round(float(pss_total1) / 1024, 2) # 单位换算成MB,保留2位小数\n current_time = self.get_current_time()\n print(\"Memory:\", pss_total)\n # self.save_data('mem', [(current_time, pss_total)]) # 将数据保存到Excel\n self.memory_data.append(pss_total) # 将数据加入列表\n self.mem_plot_plt.plot().setData(self.memory_data, pen='y') # 将数据载入图像中\n except Exception as e:\n print(traceback.print_exc())\n\n def get_fps_info(self):\n \"\"\"获取fps数据\"\"\"\n try:\n result = os.popen(\"adb -s {} shell dumpsys gfxinfo {}\".format(self.device_name, self.pkg)) # 执行adb命令\n res = result.readlines() # 获取所有行数据\n frame_count = 0 # 定义frame_count初始值\n vsync_overtime_s = [] # 定义vsync_overtime_s列表\n jank_num = 0 # 定义jank_num初始值\n for line in res: # 循环行\n if '\\t' in line: # 取出带\\t的所有行\n if '\\tcom.kmxs.reader' in line: # 过滤\\tcom.kmxs.reader数据\n r = False\n elif '\\tDraw' in line: # 过滤\\tDraw数据\n r = False\n elif '/android.view' in line:\n r = False\n else:\n frame_count = frame_count + 1 # 循环次数\n fps = line.split('\\t') # 分离数据\n # print(fps)\n Draw = float(fps[1]) # 取数据\n Prepare = float(fps[2]) # 取数据\n Process = float(fps[3]) # 取数据\n Execute = float(fps[4].replace('\\n', '')) # 取数据\n render_time = Draw + Prepare + Process + Execute # 计算render_time\n # print(render_time)\n # print('Native Heap is ', Native_Heap_mem)\n if render_time > 16.67: # 大于16.67认为是一次卡顿\n jank_num += 1 # 计算卡顿次数\n vsync_overtime = math.ceil(render_time / 16.67) - 1 # 向上取整\n vsync_overtime_s.append(vsync_overtime) # 添加到列表\n else:\n continue\n\n vsync_overtime_sum = sum(vsync_overtime_s) # 计算列表中所有数据的和\n fps_sum = frame_count + vsync_overtime_sum\n if fps_sum == 0:\n fps = 0\n print(\"手机屏幕静止\")\n else:\n fps = round(frame_count * 60 / fps_sum, 2) # 计算fps,并保留2位小数\n current_time = self.get_current_time()\n # self.save_data('fps', [(current_time, fps)]) # 将数据保存到Excel\n print(\"FPS:\", fps)\n self.fps_data.append(fps) # 将数据加入列表\n self.fps_plot_plt.plot().setData(self.fps_data, pen='m') # 将数据载入图像中\n except Exception as e:\n print(traceback.print_exc())\n\n def save_data(self, data_type, cpudata):\n pass\n # \"\"\"保存数据到Excel\"\"\"\n # if data_type == 'cpu':\n # writer = csv.writer(self.cpucsvfile) # 写入Excel\n # writer.writerows(cpudata) # 将数据写入Excel\n # elif data_type == 'mem':\n # writer = csv.writer(self.memcsvfile)\n # writer.writerows(cpudata)\n # elif data_type == 'fps':\n # writer = csv.writer(self.fpscsvfile)\n # writer.writerows(cpudata)\n # else:\n # print('data_type error!')\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n # data = MonitoringData('com.kmxs.reader', '154030353600A5G') # 请修改包名和设备号\n data = MonitoringData('com.eg.android.AlipayGphone', '154030353600A5G') # 请修改包名和设备号\n data.show()\n sys.exit(app.exec_())\n", "repo_name": "gaoaolei/yufa", "sub_path": "曲线绘制/性能曲线.py", "file_name": "性能曲线.py", "file_ext": "py", "file_size_in_byte": 11038, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "85", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 8, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 30, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 32, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 33, "usage_type": "name"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 44, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 44, "usage_type": "name"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 52, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 55, "usage_type": "name"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 65, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 73, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 73, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 79, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 101, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 106, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 124, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 129, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 155, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 173, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 192, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 192, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 192, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 196, "usage_type": "call"}]}
+{"seq_id": "31106591729", "text": "#\n# @lc app=leetcode.cn id=2125 lang=python3\n#\n# [2125] 银行中的激光束数量\n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def numberOfBeams(self, bank: List[str]) -> int:\n validRows = list(filter(lambda i:i>0, [self.sum(s) for s in bank]))\n rowCount = len(validRows)\n if rowCount < 2:\n return 0\n return sum([validRows[i] * validRows[i+1] for i in range(rowCount - 1)])\n\n def sum(self, s:str):\n return sum(int(i) for i in s)\n# @lc code=end\n\nif __name__ ==\"__main__\":\n s = Solution()\n print(s.numberOfBeams([\"011001\",\"000000\",\"010100\",\"001000\"]))\n\n", "repo_name": "ChinYoung/leetcode-solution", "sub_path": "2125.银行中的激光束数量.py", "file_name": "2125.银行中的激光束数量.py", "file_ext": "py", "file_size_in_byte": 629, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "typing.List", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "5556202234", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 6 17:01:04 2019\n\nPython script versions of ipython notebooks \nlocated in ProteinPatchAnalysis/AutoEncoderNotebooks.\nReads in mnist.pickle that contains mnist dataset\n(Cannot import MNIST directly on CCNI). \nNote that the mnist.pickle file is not included in \ngithub repository because it is too large.\n\n@author: camil\n\"\"\"\n\nimport keras\nimport pickle \nimport numpy as np\n\nfrom keras import layers\nfrom keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Lambda\nfrom keras.models import Model\nfrom keras import regularizers\nfrom keras import backend as K\n\ninput_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format\n\ndata = open(\"mnist.pickle\",\"rb\")\n[x_train,x_test] = pickle.load(data)\n\n\n\nx_train = x_train.reshape((60000, 28, 28) + (1,))\nx_test = x_test.reshape((10000, 28, 28) + (1,))\n\nbatch_size = 128\nlatent_dim = 2\n\n\nx = layers.Conv2D(32, 3,padding='same', activation='relu')(input_img)\nx = layers.Conv2D(64, 3,padding='same', activation='relu',strides=(2, 2))(x)\nx = layers.Conv2D(64, 3,padding='same', activation='relu')(x)\nx = layers.Conv2D(64, 3,padding='same', activation='relu')(x)\nshape_before_flattening = K.int_shape(x)\nx = layers.Flatten()(x)\nx = layers.Dense(32, activation='relu')(x)\n\nencoder = Model(input_img, x)\nx_encoded = encoder(input_img)\n\nz_mean = layers.Dense(latent_dim)(x_encoded)\nz_log_var = layers.Dense(latent_dim)(x)\n\n\ndef sampling(args):\n z_mean, z_log_var = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),mean=0., stddev=1.)\n return z_mean + K.exp(z_log_var) * epsilon\n\nz = layers.Lambda(sampling)([z_mean, z_log_var])\n\ndecoder_input = layers.Input(K.int_shape(z)[1:])\nx = layers.Dense(np.prod(shape_before_flattening[1:]),activation='relu')(decoder_input)\nx = layers.Reshape(shape_before_flattening[1:])(x)\nx = layers.Conv2DTranspose(32, 3,padding='same',activation='relu',strides=(2, 2))(x)\nx = layers.Conv2D(1, 3,padding='same',activation='sigmoid')(x)\n\ndecoder = Model(decoder_input, x)\nz_decoded = decoder(z)\n\n\nclass TimeHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.times = []\n\n def on_epoch_begin(self, batch, logs={}):\n self.epoch_time_start = time.time()\n\n def on_epoch_end(self, batch, logs={}):\n self.times.append(time.time() - self.epoch_time_start)\n\n\nclass CustomVariationalLayer(keras.layers.Layer):\n \n def vae_loss(self, x, z_decoded):\n x = K.flatten(x)\n z_decoded = K.flatten(z_decoded)\n xent_loss = keras.metrics.binary_crossentropy(x, z_decoded)\n kl_loss = -5e-4 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n return K.mean(xent_loss+kl_loss) #xent_loss) # + kl_loss)\n \n def call(self, inputs):\n x = inputs[0]\n z_decoded = inputs[1]\n loss = self.vae_loss(x, z_decoded)\n self.add_loss(loss, inputs=inputs)\n return x\n \ny = CustomVariationalLayer()([input_img, z_decoded])\n\nvae = Model(input_img, y)\nvae.compile(optimizer='rmsprop', loss=None)\n\n\ntime_callback = TimeHistory()\n\nhistory = vae.fit(x=x_train, y=None,shuffle=True,epochs=50,batch_size=batch_size,validation_data=(x_test, None),callbacks=[time_callback])\n\n\nhistory_dict = history.history\n\nloss_values = history_dict['loss']\nval_loss_values = history_dict['val_loss']\n\n\npickle_out = open(\"Histories/losses-xxx.pickle\",\"wb\")\npickle.dump([loss_values,val_loss_values], pickle_out)\npickle_out.close()\n\ntimes = time_callback.times\npickle_out = open(\"Histories/times-xxx.pickle\",\"wb\")\npickle.dump([loss_values,val_loss_values], pickle_out)\npickle_out.close()\n\n\n\n\n# Save the weights\nvae.save_weights('Histories/model_weights-xxx.h5')\n\n# Save the model architecture\nwith open('Histories/model_architecture.json', 'w') as f:\n f.write(vae.to_json())\n\n\n\n\n", "repo_name": "garde-group/ProteinPatchAnalysis", "sub_path": "DCS_GPU_PracticeRun/MNIST_Autoencoder_Conv_Variational-Script.py", "file_name": "MNIST_Autoencoder_Conv_Variational-Script.py", "file_ext": "py", "file_size_in_byte": 3861, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "73", "api": [{"api_name": "keras.layers.Input", "line_number": 25, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 39, "usage_type": "name"}, {"api_name": "keras.layers.Conv2D", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 40, "usage_type": "name"}, {"api_name": "keras.layers.Conv2D", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 41, "usage_type": "name"}, {"api_name": "keras.layers.Conv2D", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 42, "usage_type": "name"}, {"api_name": "keras.backend.int_shape", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 43, "usage_type": "name"}, {"api_name": "keras.layers.Flatten", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 44, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 45, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 50, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 51, "usage_type": "name"}, {"api_name": "keras.backend.random_normal", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 56, "usage_type": "name"}, {"api_name": "keras.backend.shape", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.backend.exp", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 57, "usage_type": "name"}, {"api_name": "keras.layers.Lambda", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 59, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 61, "usage_type": "name"}, {"api_name": "keras.backend.int_shape", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 61, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.prod", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 63, "usage_type": "name"}, {"api_name": "keras.layers.Conv2DTranspose", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 64, "usage_type": "name"}, {"api_name": "keras.layers.Conv2D", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 65, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 71, "usage_type": "attribute"}, {"api_name": "keras.layers", "line_number": 82, "usage_type": "attribute"}, {"api_name": "keras.backend.flatten", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 85, "usage_type": "name"}, {"api_name": "keras.backend.flatten", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 86, "usage_type": "name"}, {"api_name": "keras.metrics.binary_crossentropy", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.metrics", "line_number": 87, "usage_type": "attribute"}, {"api_name": "keras.backend.mean", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 88, "usage_type": "name"}, {"api_name": "keras.backend.square", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.backend.exp", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.backend.mean", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 89, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 100, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 116, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 121, "usage_type": "call"}]}
+{"seq_id": "6565877666", "text": "import os\nimport allure\nimport pytest\nfrom api.advice_predicting import Predicting\nfrom common.get_log import log\nfrom conftest import headers_gl\n\n\nclass TestPredicting():\n \"\"\"\n 用户查询测试类\n 1.参数化存放在特定的yml文件中,用三级目录管理用例、参数数据和ids的数据\n 2.critical的用例等级为完整测试,blocker等级为冒烟测试\n 3.每个用例都配合fixture,完成了不同的前置和后置,实现了不同用例互不干扰的状态\n \"\"\"\n\n # 新建公司事件类实例\n event = Predicting()\n\n # 获取参数化的数据\n para_data = event.load_yaml('testcase/advice_predicting/para_advice_predicting.yml')\n\n # 获取不同用例需要的参数化数据以及ids标题数据\n # add_data = para_data[\"add\"][\"data\"]\n # add_ids = para_data[\"add\"][\"ids\"]\n\n get_data = para_data[\"get\"][\"data\"]\n get_ids = para_data[\"get\"][\"ids\"]\n\n #\n # delete_data = para_data[\"delete\"][\"data\"]\n # delete_ids = para_data[\"delete\"][\"ids\"]\n #\n # edit_data = para_data[\"edit\"][\"data\"]\n # edit_ids = para_data[\"edit\"][\"ids\"]\n\n @pytest.mark.parametrize(\"Authorization, status, expect_http_code, type, amount, target\", get_data, ids=get_ids)\n @allure.severity(allure.severity_level.CRITICAL)\n def test_advice_predicting(self, env, Authorization, status, expect_http_code, type, amount, target):\n algo_domain = env['host']['algo_users']\n # Authorization = headers_gl['get']['headers']['Authorization']\n log.info(\"-------------开始获取公司事件测试---------\")\n\n res = self.event.get_advice_predicting(algo_domain, Authorization, type, amount, target)\n log.info(\"-------------测试结束---------\")\n\n # ******** http协议状态码判断 ********\n print('******** http协议状态码判断... ')\n print(\"res.status_code: {0}\".format(res.status_code))\n if res.status_code == expect_http_code:\n print('\\n检测点:http status_code 符合预期!')\n assert True\n else:\n print('WARN --> http请求响应status_code={0}'.format(res.status_code))\n assert False\n # ******** 实际测试输出 ********\n print('******** 实际测试输出... ')\n res_json = res.json()\n if res.status_code == 200:\n real_data = res_json['data']\n error = res_json['errors']\n if real_data: # 数据非空\n # print('\\n检测点:接口返回data非空,符合预期!')\n print('\\n检测点:接口返回data 符合预期!返回数据为:{0}'.format(real_data))\n # 检测返回的数据类型\n # if isinstance(real_data, dict):\n # print('\\n检测点:接口返回data type符合预期!返回数据为:{0}'.format(type(real_data)))\n # else:\n # print('WARN --> data 部分类型与预期不符,预期类型:dict,实际类型:{0}.(real_data={1})'.format(type(real_data),\n # real_data))\n # assert False\n # event_keys = ['clientNumber', 'riskType', 'prefRegion', 'prefSector', 'status',\n # 'riskAckStatus']\n orders_data = res_json['data']['orders']\n # print(orders_data)\n iii = []\n for i in orders_data:\n ii = i['weight']\n # print(ii)\n iii.append(ii)\n weight_total = sum(iii)\n if weight_total == 1:\n print(\"\\n检测点:申购试算,接口返回weight和等于1 符合预期!\")\n assert True\n else:\n print(\"\\n检测点:与预期不符!weight和为:\",weight_total)\n print(\"\\n试算返回的各基金weight:\", iii)\n assert False\n return real_data\n elif error[0]['code'] == 100002:\n assert True\n elif error[0]['code'] == 300008:\n msg = error[0]['message']\n error_code = error[0]['code']\n print(\"\\n检测点:起投金额检测\")\n print(str(error_code),msg)\n # print(real_data)\n assert True\n else:\n assert False\n else:\n pass\n\n\n\nif __name__ == '__main__':\n # a = TestEvents\n # print('Test data: {0}, Test ids: {1}'.format(a.get_data, a.get_ids))\n pytest.main(['-v', 'test_advice_predicting.py'])\n # pytest.main(['-v', '--alluredir', '../report/result', 'test_algo_stock_info.py'])\n # os.system('allure generate ../report/result -o ../report/html --clean')\n", "repo_name": "ouyangrunqi/test", "sub_path": "module/advice_predicting/test_advice_predicting.py", "file_name": "test_advice_predicting.py", "file_ext": "py", "file_size_in_byte": 4852, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "api.advice_predicting.Predicting", "line_number": 18, "usage_type": "call"}, {"api_name": "common.get_log.log.info", "line_number": 42, "usage_type": "call"}, {"api_name": "common.get_log.log", "line_number": 42, "usage_type": "name"}, {"api_name": "common.get_log.log.info", "line_number": 45, "usage_type": "call"}, {"api_name": "common.get_log.log", "line_number": 45, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "allure.severity", "line_number": 38, "usage_type": "call"}, {"api_name": "allure.severity_level", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pytest.main", "line_number": 109, "usage_type": "call"}]}
+{"seq_id": "24149796965", "text": "'''\r\nCreated on 18 Jul 2013\r\n\r\n@author: toke.jepsen\r\n'''\r\n\r\n\r\nimport maya.cmds as cmds\r\nimport maya.mel as mel \r\n\r\n\r\nfrom sgtk import TankError\r\nimport sgtk\r\n\r\n#getting context- THIS NEED TO USE THE APP CONTEXT INSTEAD!\r\n#projectPath=cmds.workspace(q=True,fullName=True)\r\n#tk = sgtk.sgtk_from_path(projectPath)\r\n#ctx=tk.context_from_path(projectPath)\r\nctx=self._app.context.entity\r\n \r\n#flush namespaces\r\ncmds.namespace(setNamespace=\"::\")\r\ncurrentNameSpaces = cmds.namespaceInfo(listOnlyNamespaces=True)\r\n \r\ndef removeNamespaces(namespace='::'):\r\n \r\n ignoreNamespaces = ['UI', 'shared']\r\n \r\n cmds.namespace(setNamespace=namespace)\r\n childNamespaces = cmds.namespaceInfo(listOnlyNamespaces=True)\r\n if childNamespaces:\r\n \r\n namespaces=list(set(childNamespaces)-set(ignoreNamespaces))\r\n for n in namespaces:\r\n removeNamespaces(':'+n)\r\n else:\r\n parent=':'+cmds.namespaceInfo(parent=True)\r\n cmds.namespace(setNamespace=parent)\r\n \r\n cmds.namespace(moveNamespace=[namespace,parent],force=True)\r\n \r\n cmds.namespace(removeNamespace=namespace)\r\n \r\n if parent!='::':\r\n removeNamespaces(parent)\r\n \r\nremoveNamespaces()\r\n \r\n#deleting history\r\nfor mesh in cmds.ls(type='mesh'):\r\n \r\n if cmds.objExists(mesh):\r\n \r\n #delete history\r\n cmds.delete(mesh,ch=True)\r\n \r\ninvisibleMeshes=[]\r\n#check scene for invisible meshes\r\nfor mesh in cmds.ls(type='mesh'):\r\n \r\n if cmds.objExists(mesh):\r\n \r\n transform=cmds.listRelatives(mesh,parent=True)[0]\r\n \r\n #make visible - need to raise error if some objects are invisible---\r\n if cmds.getAttr(transform+'.v')==0:\r\n invisibleMeshes.append(transform)\r\n \r\n \r\nif invisibleMeshes:\r\n \r\n listString=''\r\n for mesh in invisibleMeshes:\r\n listString+=mesh+','\r\n \r\n #raise sgtk.TankError(\"Unable to perform pre-publish for invisible meshes %s\" % listString)\r\n \r\n#flush any tranforms that arent a mesh\r\nfor transform in cmds.ls(type='transform'):\r\n \r\n if cmds.objExists(transform):\r\n shapes=cmds.listRelatives(transform,shapes=True,fullPath=True)\r\n \r\n #deleting empty transforms\r\n if shapes:\r\n check=False\r\n for shape in shapes:\r\n \r\n #deleting everything but meshes\r\n shapeType=cmds.nodeType(shape)\r\n if shapeType=='mesh':\r\n check=True\r\n if shapeType=='camera':\r\n cams=['front','top','persp','side']\r\n if transform in cams:\r\n check=True\r\n \r\n if not check:\r\n cmds.delete(transform)\r\n else:\r\n cmds.delete(transform)\r\n \r\n#geo group\r\ngeogrp=cmds.group(empty=True,n='geo')\r\n \r\nmeshes=[]\r\n#process meshes\r\nfor mesh in cmds.ls(type='mesh'):\r\n \r\n if cmds.objExists(mesh):\r\n \r\n transform=cmds.listRelatives(mesh,parent=True)[0]\r\n parent=cmds.listRelatives(transform,parent=True)\r\n if parent!=None:\r\n parent=parent[0]\r\n \r\n #make visible - need to raise error if some objects are invisible---\r\n #if cmds.getAttr(transform+'.v')==0:\r\n # raise cmds.error()\r\n \r\n #set pivot to world zero\r\n posGrp=cmds.group(empty=True)\r\n \r\n pivotTranslate = cmds.xform (posGrp, q = True, ws = True, rotatePivot = True)\r\n \r\n cmds.parent(transform, posGrp)\r\n cmds.makeIdentity(transform, a = True, t = True, r = True, s = True)\r\n cmds.xform (transform, ws = True, pivots = pivotTranslate)\r\n \r\n if parent!=None:\r\n cmds.parent(transform,parent)\r\n else:\r\n cmds.parent(transform,w=True)\r\n \r\n cmds.delete(posGrp)\r\n \r\n #deleting any unused nodes\r\n cmd='MLdeleteUnused;'\r\n mel.eval(cmd)\r\n \r\n #adding asset tag\r\n if not cmds.objExists(transform+'.asset'):\r\n cmds.addAttr(transform,ln='asset',dt='string')\r\n cmds.setAttr(transform+'.asset',ctx.entity['name'],type='string')\r\n \r\n #add to group\r\n if parent!=geogrp:\r\n cmds.parent(transform,geogrp)", "repo_name": "baitstudio/CodeRepo", "sub_path": "temp/validate_maya_scene_pre_publish.py", "file_name": "validate_maya_scene_pre_publish.py", "file_ext": "py", "file_size_in_byte": 4287, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "73", "api": [{"api_name": "maya.cmds.namespace", "line_number": 22, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 22, "usage_type": "name"}, {"api_name": "maya.cmds.namespaceInfo", "line_number": 23, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 23, "usage_type": "name"}, {"api_name": "maya.cmds.namespace", "line_number": 29, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 29, "usage_type": "name"}, {"api_name": "maya.cmds.namespaceInfo", "line_number": 30, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 30, "usage_type": "name"}, {"api_name": "maya.cmds.namespaceInfo", "line_number": 37, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 37, "usage_type": "name"}, {"api_name": "maya.cmds.namespace", "line_number": 38, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 38, "usage_type": "name"}, {"api_name": "maya.cmds.namespace", "line_number": 40, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 40, "usage_type": "name"}, {"api_name": "maya.cmds.namespace", "line_number": 42, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 42, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 50, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 50, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 52, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 52, "usage_type": "name"}, {"api_name": "maya.cmds.delete", "line_number": 55, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 55, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 59, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 59, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 61, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 61, "usage_type": "name"}, {"api_name": "maya.cmds.listRelatives", "line_number": 63, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 63, "usage_type": "name"}, {"api_name": "maya.cmds.getAttr", "line_number": 66, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 66, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 79, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 79, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 81, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 81, "usage_type": "name"}, {"api_name": "maya.cmds.listRelatives", "line_number": 82, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 82, "usage_type": "name"}, {"api_name": "maya.cmds.nodeType", "line_number": 90, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 90, "usage_type": "name"}, {"api_name": "maya.cmds.delete", "line_number": 99, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 99, "usage_type": "name"}, {"api_name": "maya.cmds.delete", "line_number": 101, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 101, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 104, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 104, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 108, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 108, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 110, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 110, "usage_type": "name"}, {"api_name": "maya.cmds.listRelatives", "line_number": 112, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 112, "usage_type": "name"}, {"api_name": "maya.cmds.listRelatives", "line_number": 113, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 113, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 122, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 122, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 124, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 124, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 126, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 126, "usage_type": "name"}, {"api_name": "maya.cmds.makeIdentity", "line_number": 127, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 127, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 128, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 128, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 131, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 131, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 133, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 133, "usage_type": "name"}, {"api_name": "maya.cmds.delete", "line_number": 135, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 135, "usage_type": "name"}, {"api_name": "maya.mel.eval", "line_number": 139, "usage_type": "call"}, {"api_name": "maya.mel", "line_number": 139, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 142, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 142, "usage_type": "name"}, {"api_name": "maya.cmds.addAttr", "line_number": 143, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 143, "usage_type": "name"}, {"api_name": "maya.cmds.setAttr", "line_number": 144, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 144, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 148, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 148, "usage_type": "name"}]}
+{"seq_id": "13763524196", "text": "import pandas as pd\r\nimport numpy as np\r\nfrom pprint import pprint\r\nimport csv\r\nfrom dicttoxml import dicttoxml\r\nimport sys\r\nimport infoGain\r\nfile_name = sys.argv[1]\r\noutput_file = sys.argv[2]\r\n\r\n\r\nwith open(file_name) as csv_file:\r\n data_list = list(csv.reader(csv_file))\r\n\r\nm = len(data_list)\r\nn = len(data_list[1])\r\n\r\natt = []\r\n\r\nfor attr in range(0,n):\r\n att.append('att'+ str(attr))\r\n\r\n\r\ndataset = pd.read_csv(file_name,names=att)\r\n\r\n\r\n\r\ndef ID3(data, originaldata, features, target_attribute_name, parent_node_class=None):\r\n\r\n\r\n if len(np.unique(data[target_attribute_name])) <= 1:\r\n return np.unique(data[target_attribute_name])[0]\r\n\r\n\r\n elif len(data) == 0:\r\n return np.unique(originaldata[target_attribute_name])[\r\n np.argmax(np.unique(originaldata[target_attribute_name], return_counts=True)[1])]\r\n\r\n\r\n elif len(features) == 0:\r\n return parent_node_class\r\n\r\n\r\n\r\n else:\r\n\r\n parent_node_class = np.unique(data[target_attribute_name])[\r\n np.argmax(np.unique(data[target_attribute_name], return_counts=True)[1])]\r\n\r\n\r\n item_values = [infoGain.InfoGain(data, feature, att[-1]) for feature in features]\r\n entropy_values = []\r\n entropy_values.append([infoGain.entropyList(data, feature, att[-1]) for feature in features])\r\n best_feature_index = np.argmax(item_values)\r\n best_feature = features[best_feature_index]\r\n\r\n\r\n tree = {best_feature:{}}\r\n\r\n\r\n features = [i for i in features if i != best_feature]\r\n\r\n for value in np.unique(data[best_feature]):\r\n value = value\r\n\r\n sub_data = data.where(data[best_feature] == value).dropna()\r\n\r\n\r\n subtree = ID3(sub_data, dataset, features, att[-1], parent_node_class)\r\n\r\n\r\n tree[best_feature][value] = subtree\r\n\r\n\r\n return (tree)\r\n\r\n\r\ntree = ID3(dataset,dataset,dataset.columns[:-1],att[-1])\r\n\r\nxml = dicttoxml(tree)\r\n\r\nwith open(output_file, \"wb\") as f:\r\n f.write(xml)\r\n\r\n", "repo_name": "nandish21594/machine_learning_algorithms", "sub_path": "Decision tree/Decision_tree.py", "file_name": "Decision_tree.py", "file_ext": "py", "file_size_in_byte": 1996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 48, "usage_type": "call"}, {"api_name": "infoGain.InfoGain", "line_number": 51, "usage_type": "call"}, {"api_name": "infoGain.entropyList", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 63, "usage_type": "call"}, {"api_name": "dicttoxml.dicttoxml", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "73731307117", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 3 13:16:48 2017\n\n@author: deaxman\n\"\"\"\n\n#%%\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\nimport pandas as pd\nfrom numpy.linalg import inv\nfrom numpy.linalg import det\n#%%\nS=np.array([1,1,1,2,2,3,3,3,2,4,4,4])-1\n#X=np.array([[0,0],[-1,0],[0,1],[3,3],[3,4],[7,7],[7,8],[8,7],[2,3],[12,10],[10,10],[11,10]])\nX=np.random.rand(50,2)*10\nplt.scatter(X[:,0],X[:,1])\nk=4\ncenters=X[np.random.choice(X.shape[0],size=(4),replace=False),:]\nsigmas=np.array([np.eye(2) for i in range(k)])\nsigmaInvs=np.array([inv(sigmas[i,:,:]) for i in range(k)])\nZ=np.zeros((200,200))\ndef createCovMat(X,w):\n print(np.dot(X.T*w.T,X)/(np.sum(w[0])))\n return np.dot(X.T*w.T,X)/(np.sum(w[0]))\n\ndef gaussian2d(X,mu,sigma):\n return np.exp(-0.5*np.dot(np.dot((X-mu),inv(sigma)),(X-mu).T))/(2*np.pi*det(sigma))\n \nfor i in np.arange(5):\n W=np.exp(-0.5*np.sum((X[:,:,np.newaxis]-centers[:,:,np.newaxis].T).T*np.sum((sigmaInvs[:,:,:,np.newaxis]\n *(X[:,:,np.newaxis]-centers[:,:,np.newaxis].T).T[:,:,:,np.newaxis].transpose((0,1,3,2))),axis=1),axis=1).T)/((2*np.pi*np.array([np.sqrt(det(sigmas[i,:,:])) for i in range(k)]))[:,np.newaxis].T)\n \n #/((2*np.pi*np.array([np.sqrt(det(sigmas[i,:,:])) for i in range(k)]))[:,np.newaxis].T)\n l=np.argmin(W,axis=1)\n fig=plt.figure()\n ax=plt.axes()\n ax.scatter(X[:,0],X[:,1])\n a, b = np.meshgrid(np.arange(-5,15,0.1), np.arange(-5,15,0.1))\n for blob in range(centers.shape[0]):\n for j in range(200):\n for i in range(200):\n Z[j,i]=gaussian2d(np.concatenate((a[:,:,np.newaxis],b[:,:,np.newaxis]),axis=2)[i,j,:],centers[blob,:],sigmas[blob,:])\n ax.contour(a,b,Z)\n ax.scatter(centers[:,0],centers[:,1],marker='x')\n centers=np.array([np.sum(X*(W[:,i])[:,np.newaxis],axis=0)/np.sum(W[:,i],axis=0) for i in range(k)])\n sigmas=np.array([createCovMat(X,(W[:,i])[:,np.newaxis]) for i in range(k)])\n sigmaInvs=np.array([inv(sigmas[i,:,:]) for i in range(k)])\n \n \n \n", "repo_name": "dustinaxman/MLPractice", "sub_path": "GMMpractice.py", "file_name": "GMMpractice.py", "file_ext": "py", "file_size_in_byte": 2107, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.linalg.det", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 52, "usage_type": "call"}]}
+{"seq_id": "71542313195", "text": "import utils as ut\n\ndef parse1(line):\n\treturn line\n\ndef day13p1():\n print('day 13 part 1')\n lines = ut.get_file('day13_input.txt', parse1)\n # lines = ut.get_file('day13_input_small.txt', parse1)\n\n start_time = int(lines[0])\n schedule = list(map(int, ''.join(lines[1].split('x,')).split(',')))\n departs_in = float('inf')\n bus_id = -1\n for time in schedule:\n new_departs_in = time - start_time % time\n if new_departs_in < departs_in:\n departs_in = new_departs_in\n bus_id = time\n\n print(departs_in, bus_id)\n return departs_in * bus_id\n\n# print(day13p1()) #12min\n\ndef parse2(line):\n\treturn line\n\ndef day13p2():\n print('day 13 part 2')\n lines = ut.get_file('day13_input.txt', parse2)\n # lines = ut.get_file('day13_input_small.txt', parse2)\n\n offset = 0\n schedule = []\n for val in lines[1].split(','):\n # if val == 'x':\n # offset += 1\n if val.isnumeric():\n bus_id = int(val)\n schedule.append((bus_id, offset))\n offset+=1\n print(schedule)\n\n max_sync = 1\n for bus_id, offset in schedule:\n max_sync *= bus_id\n\n def validate(t, schedule):\n for bus_id, offset in schedule:\n if not ((t+offset) % bus_id == 0):\n return False\n return True\n\n first_bus = 467 # int(lines[1].split(',')[0])\n t = 100000000000171 # max_sync\n counter = 0\n while True:\n # while t > 0:\n counter += 1\n if counter % 10000000 == 0:\n print(counter, t)\n if validate(t-29, schedule):\n return t\n # t -= first_bus\n t += first_bus\n\n return schedule\n\n# print(day13p2())\n\ndef day13p2v2():\n print('day 13 part 2v2')\n lines = ut.get_file('day13_input.txt', parse2)\n # lines = ut.get_file('day13_input_small.txt', parse2)\n\n offset = 0\n schedule = []\n for val in lines[1].split(','):\n if val.isnumeric():\n bus_id = int(val)\n schedule.append((bus_id, offset))\n offset += 1\n print(schedule)\n\n all_buses = [bus_id for bus_id, offset in schedule]\n\n # find a match with the the first n buses\n # use that match to update the search_offset (=0)\n # update the range operators\n range_top = 1\n range_step = 1\n # prev_bus_id = 1\n search_offset = 0\n for i, (bus_id, offset) in enumerate(schedule):\n range_top *= bus_id\n found = check_match(range_top, range_step, search_offset, schedule[:i+1])\n if found:\n search_offset = found\n print('found', search_offset)\n range_step *= bus_id\n\n return search_offset\n\ndef check_match(range_top, range_step, search_offset, schedule):\n print('every', range_step, 'offset', search_offset, 'sched', schedule)\n for i in range(0, range_top, range_step):\n if i == 0:\n continue\n t = i + search_offset\n match = [False] * len(schedule)\n\n bus_id, offset = schedule[-1]\n if (t + offset) % bus_id == 0:\n return t # short version\n # for j, (bus_id, offset) in enumerate(schedule):\n # if (t + offset) % bus_id == 0:\n # print('one', t)\n # match[j] = True\n # if all(match):\n # return i # return new offset\n return False\n\nprint(day13p2v2()) # 3hr 46min\n", "repo_name": "yufengg/adventofcode", "sub_path": "day13.py", "file_name": "day13.py", "file_ext": "py", "file_size_in_byte": 3345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "73", "api": [{"api_name": "utils.get_file", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.get_file", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.get_file", "line_number": 74, "usage_type": "call"}]}
+{"seq_id": "74389128236", "text": "from tkinter import *\nfrom datetime import datetime\n\nfrom constants import DATE_TIME_FORMAT\n\nCELL_MARGIN = 10\nCELL_PADDING = 2\n\n\nclass ShowPercentage(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent)\n\n self.show_efficiency_hangers = StringVar()\n self.show_efficiency_change_colour = StringVar()\n\n self.efficiency_colour_title = Label(self,\n text=\"Rendimiento del último color\",\n anchor=\"center\",\n relief=\"groove\")\n self.efficiency_colour_title.grid(row=0,\n column=0,\n padx=(0, CELL_PADDING),\n pady=(0, CELL_PADDING),\n ipadx=CELL_MARGIN,\n ipady=CELL_MARGIN,\n sticky=W + E + N + S)\n self.efficiency_colour_result = Label(self,\n textvariable=self.show_efficiency_hangers,\n bg=\"white\",\n anchor=\"center\",\n relief=\"groove\")\n self.efficiency_colour_result.grid(row=1,\n column=0,\n padx=(0, CELL_PADDING),\n pady=(0, CELL_PADDING),\n ipadx=CELL_MARGIN,\n ipady=CELL_MARGIN,\n sticky=W + E + N + S)\n self.efficiency_change_colour_title = Label(self,\n text=\"Rendimiento del último cambio de color\",\n anchor=\"center\",\n relief=\"groove\")\n self.efficiency_change_colour_title.grid(row=3,\n column=0,\n padx=(0, CELL_PADDING),\n pady=(0, CELL_PADDING),\n ipadx=CELL_MARGIN,\n ipady=CELL_MARGIN,\n sticky=W + E + N + S)\n self.efficiency_change_colour_result = Label(self,\n textvariable=self.show_efficiency_change_colour,\n bg=\"white\",\n anchor=\"center\",\n relief=\"groove\")\n self.efficiency_change_colour_result.grid(row=4,\n column=0,\n padx=(0, CELL_PADDING),\n pady=(0, CELL_PADDING),\n ipadx=CELL_MARGIN,\n ipady=CELL_MARGIN,\n sticky=W + E + N + S)\n\n def update_change_colour_time_efficiency(self, change_times_map, history_as_records):\n last_colour = history_as_records[len(history_as_records) - 1].colour_code\n penultimate_colour = history_as_records[len(history_as_records) - 2].colour_code\n concatenate_two_colours = penultimate_colour + \"-\" + last_colour\n\n change_time_by_colour_combo = change_times_map.get(concatenate_two_colours, None)\n if change_time_by_colour_combo is not None:\n efficiency = self.__calculate_efficiency_change_colour(change_time_by_colour_combo, history_as_records)\n self.show_efficiency_change_colour.set(efficiency)\n else:\n self.show_efficiency_change_colour.set(\"No hay datos\\nanteriores con lo\\nque comparar.\")\n\n def update_colour_time_efficiency(self,\n start_datetime_as_string,\n end_datetime_as_string,\n amount_of_hangers_as_string):\n # Convierto los entrys en fechas para poder restarlas.\n start_colour = datetime.strptime(start_datetime_as_string, DATE_TIME_FORMAT)\n end_colour = datetime.strptime(end_datetime_as_string, DATE_TIME_FORMAT)\n\n # Convertimos los bastidores a un entero.\n hangers = int(amount_of_hangers_as_string)\n\n # Restamos las dos fechas y lo pasamos a segundos.\n time_diff = end_colour - start_colour\n time_colour = time_diff.days * 24 * 3600 + time_diff.seconds\n\n # Comparamos el número de bastidores con los que podrían pasar con un rendimiento del 100%.\n ideal_hanger_passing_time = 10\n max_hangers_in_time_colour = time_colour / ideal_hanger_passing_time\n\n # Eficiencia del paso de bastidores de este color.\n efficiency_hangers = hangers / max_hangers_in_time_colour\n\n # round(number,1) sirve para redondear un flotante al decimal que queramos\n percentage_efficiency_hangers = str(int(efficiency_hangers * 100)) + \" %\"\n\n self.show_efficiency_hangers.set(percentage_efficiency_hangers)\n\n def __calculate_efficiency_change_colour(self, change_time_by_colour_combo, history_as_records):\n average_time_of_colour_change = int(change_time_by_colour_combo)\n time1 = datetime.strptime(history_as_records[len(history_as_records) - 1].change_start_time, DATE_TIME_FORMAT)\n time2 = datetime.strptime(history_as_records[len(history_as_records) - 1].colour_start_time, DATE_TIME_FORMAT)\n\n last_time_change = time2 - time1\n\n last_time_change_in_seconds = last_time_change.days * 24 * 3600 + last_time_change.seconds\n\n efficiency_change_last_colour = \\\n str(int((average_time_of_colour_change * 100) / last_time_change_in_seconds)) + \" %\"\n return efficiency_change_last_colour\n", "repo_name": "hector231091/control_colores_lacado_II", "sub_path": "percentage.py", "file_name": "percentage.py", "file_ext": "py", "file_size_in_byte": 6211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 81, "usage_type": "call"}, {"api_name": "constants.DATE_TIME_FORMAT", "line_number": 81, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 82, "usage_type": "call"}, {"api_name": "constants.DATE_TIME_FORMAT", "line_number": 82, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 105, "usage_type": "call"}, {"api_name": "constants.DATE_TIME_FORMAT", "line_number": 105, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 106, "usage_type": "call"}, {"api_name": "constants.DATE_TIME_FORMAT", "line_number": 106, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "name"}]}
+{"seq_id": "11876225420", "text": "import io\nimport os\n\nfrom shapely.geometry import Polygon, GeometryCollection, MultiPolygon\nfrom typing import List\n\nfrom wai.common.cli.options import TypedOption, FlagOption\nfrom wai.common.geometry import Polygon as WaiPolygon\nfrom wai.common.geometry import Point as WaiPoint\nfrom wai.common.adams.imaging.locateobjects import LocatedObjects, LocatedObject\nfrom wai.annotations.domain.image import Image\nfrom wai.annotations.domain.image import ImageInstance\nfrom wai.annotations.domain.classification import Classification\nfrom wai.annotations.domain.image.classification import ImageClassificationInstance\nfrom wai.annotations.domain.image.object_detection import ImageObjectDetectionInstance\nfrom wai.annotations.core.component import ProcessorComponent\nfrom wai.annotations.core.stream import ThenFunction, DoneFunction\nfrom wai.annotations.core.stream.util import RequiresNoFinalisation\n\n\nREGION_SORTING_NONE = \"none\"\nREGION_SORTING_XY = \"x-then-y\"\nREGION_SORTING_YX = \"y-then-x\"\nREGION_SORTING = [\n REGION_SORTING_NONE,\n REGION_SORTING_XY,\n REGION_SORTING_YX,\n]\n\n\nclass SubImages(\n RequiresNoFinalisation,\n ProcessorComponent[ImageInstance, ImageInstance]\n):\n \"\"\"\n Stream processor which turns RGB images into fake grayscale ones.\n \"\"\"\n\n regions: List[str] = TypedOption(\n \"-r\", \"--regions\",\n type=str,\n nargs=\"+\",\n help=\"the regions (X,Y,WIDTH,HEIGHT) to crop and forward with their annotations\"\n )\n\n region_sorting: str = TypedOption(\n \"-s\", \"--region-sorting\",\n type=str,\n default=REGION_SORTING_NONE,\n help=\"how to sort the supplied region definitions: %s\" % \"|\".join(REGION_SORTING)\n )\n\n include_partial: bool = FlagOption(\n \"-p\", \"--include-partial\",\n help=\"whether to include only annotations that fit fully into a region or also partial ones\"\n )\n\n skip_empty: bool = FlagOption(\n \"-e\", \"--suppress-empty\",\n help=\"suppresses sub-images that have no annotations (object detection)\"\n )\n\n verbose: bool = FlagOption(\n \"--verbose\",\n help=\"for outputting debugging information\"\n )\n\n def _initialize(self):\n \"\"\"\n Parses options.\n \"\"\"\n self._regions_xyxy = []\n self._region_lobjs = []\n for region in self.regions:\n coords = [int(x) for x in region.split(\",\")]\n if len(coords) == 4:\n x, y, w, h = coords\n self._region_lobjs.append(LocatedObject(x=x, y=y, width=w, height=h))\n\n if self.verbose:\n self.logger.info(\"unsorted regions: %s\" % str([str(x) for x in self._region_lobjs]))\n\n if self.region_sorting is not REGION_SORTING_NONE:\n if self.region_sorting == REGION_SORTING_XY:\n def sorting(obj: LocatedObject):\n return \"%06d %06d\" % (obj.x, obj.y)\n elif self.region_sorting == REGION_SORTING_YX:\n def sorting(obj: LocatedObject):\n return \"%06d %06d\" % (obj.y, obj.x)\n else:\n raise Exception(\"Unhandled region sorting: %s\" % self.region_sorting)\n self._region_lobjs.sort(key=sorting)\n if self.verbose:\n self.logger.info(\"sorted regions: %s\" % str([str(x) for x in self._region_lobjs]))\n\n for lobj in self._region_lobjs:\n self._regions_xyxy.append((lobj.x, lobj.y, lobj.x + lobj.width - 1, lobj.y + lobj.height - 1))\n if self.verbose:\n self.logger.info(\"sorted xyxy: %s\" % str(self._regions_xyxy))\n\n def _new_filename(self, filename, index):\n \"\"\"\n Generates a new filename based on the original and the index of the region.\n\n :param filename: the base filename\n :type filename: str\n :param index: the region index\n :type index: int\n :return: the generated filename\n :rtype: str\n \"\"\"\n parts = os.path.splitext(filename)\n pattern = \"-%0\" + str(len(str(len(self._region_lobjs)))) + \"d\"\n return parts[0] + pattern % index + parts[1]\n\n def _bbox_to_shapely(self, lobj: LocatedObject) -> Polygon:\n \"\"\"\n Converts the located object rectangle into a shapely Polygon.\n\n :param lobj: the bbox to convert\n :return: the Polygon\n \"\"\"\n coords = [\n (lobj.x, lobj.y),\n (lobj.x + lobj.width - 1, lobj.y),\n (lobj.x + lobj.width - 1, lobj.y + lobj.height - 1),\n (lobj.x, lobj.y + lobj.height - 1),\n (lobj.x, lobj.y),\n ]\n return Polygon(coords)\n\n def _polygon_to_shapely(self, lobj: LocatedObject) -> Polygon:\n \"\"\"\n Converts the located object polygon into a shapely Polygon.\n\n :param lobj: the polygon to convert\n :return: the Polygon\n \"\"\"\n if not lobj.has_polygon():\n return self._bbox_to_shapely(lobj)\n x_list = lobj.get_polygon_x()\n y_list = lobj.get_polygon_y()\n coords = []\n for x, y in zip(x_list, y_list):\n coords.append((x, y))\n coords.append((x_list[0], y_list[0]))\n return Polygon(coords)\n\n def _fit_annotation(self, index: int, region: LocatedObject, annotation: LocatedObject) -> LocatedObject:\n \"\"\"\n Fits the annotation into the specified region, adjusts size if necessary.\n\n :param index: the index of the region\n :param region: the region to fit the annotation in\n :param annotation: the annotation to fit\n :return: the adjust annotation\n \"\"\"\n sregion = self._bbox_to_shapely(region)\n sbbox = self._bbox_to_shapely(annotation)\n sintersect = sbbox.intersection(sregion)\n minx, miny, maxx, maxy = [int(x) for x in sintersect.bounds]\n result = LocatedObject(x=minx-region.x, y=miny-region.y, width=maxx-minx+1, height=maxy-miny+1, **annotation.metadata)\n result.metadata[\"region_index\"] = index\n result.metadata[\"region_xywh\"] = \"%d,%d,%d,%d\" % (region.x, region.y, region.width, region.height)\n\n if annotation.has_polygon():\n spolygon = self._polygon_to_shapely(annotation)\n else:\n spolygon = self._bbox_to_shapely(annotation)\n\n try:\n sintersect = spolygon.intersection(sregion)\n except:\n self.logger.warning(\"Failed to compute intersection!\")\n sintersect = None\n\n if isinstance(sintersect, GeometryCollection):\n for x in sintersect.geoms:\n if isinstance(x, Polygon):\n sintersect = x\n break\n elif isinstance(sintersect, MultiPolygon):\n for x in sintersect.geoms:\n if isinstance(x, Polygon):\n sintersect = x\n break\n\n if isinstance(sintersect, Polygon):\n x_list, y_list = sintersect.exterior.coords.xy\n points = []\n for i in range(len(x_list)):\n points.append(WaiPoint(x=x_list[i]-region.x, y=y_list[i]-region.y))\n result.set_polygon(WaiPolygon(*points))\n else:\n self.logger.warning(\"Unhandled geometry type returned from intersection, skipping: %s\" % str(type(sintersect)))\n\n return result\n\n def process_element(\n self,\n element: ImageInstance,\n then: ThenFunction[ImageInstance],\n done: DoneFunction\n ):\n if not hasattr(self, \"_regions_xyxy\"):\n self._initialize()\n\n img_in = element.data\n\n pil_image = img_in.pil_image\n for region_index, region_xyxy in enumerate(self._regions_xyxy):\n if self.verbose:\n self.logger.info(\"Applying region %d :%s\" % (region_index, str(region_xyxy)))\n # crop image\n sub_image = pil_image.crop(region_xyxy)\n pil_img_bytes = io.BytesIO()\n sub_image.save(pil_img_bytes, format=img_in.format.pil_format_string)\n img_out = Image(self._new_filename(img_in.filename, region_index), pil_img_bytes.getvalue(), img_in.format, img_in.size)\n # crop annotations and forward\n region_lobj = self._region_lobjs[region_index]\n if isinstance(element, ImageClassificationInstance):\n annotations = Classification(label=element.annotations.label)\n new_element = ImageClassificationInstance(data=img_out, annotations=annotations)\n then(new_element)\n elif isinstance(element, ImageObjectDetectionInstance):\n new_objects = []\n for ann_lobj in element.annotations:\n ratio = region_lobj.overlap_ratio(ann_lobj)\n if ((ratio > 0) and self.include_partial) or (ratio >= 1):\n new_objects.append(self._fit_annotation(region_index, region_lobj, ann_lobj))\n if not self.skip_empty or (len(new_objects) > 0):\n new_element = ImageObjectDetectionInstance(data=img_out, annotations=LocatedObjects(new_objects))\n then(new_element)\n else:\n self.logger.warning(\"Unhandled data (%s), skipping!\" % str(type(element)))\n then(element)\n return\n", "repo_name": "waikato-ufdl/wai-annotations-imgaug", "sub_path": "src/wai/annotations/imgaug/isp/sub_images/component/_SubImages.py", "file_name": "_SubImages.py", "file_ext": "py", "file_size_in_byte": 9281, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "wai.annotations.core.stream.util.RequiresNoFinalisation", "line_number": 32, "usage_type": "name"}, {"api_name": "wai.annotations.core.component.ProcessorComponent", "line_number": 33, "usage_type": "name"}, {"api_name": "wai.annotations.domain.image.ImageInstance", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 39, "usage_type": "name"}, {"api_name": "wai.common.cli.options.TypedOption", "line_number": 39, "usage_type": "call"}, {"api_name": "wai.common.cli.options.TypedOption", "line_number": 46, "usage_type": "call"}, {"api_name": "wai.common.cli.options.FlagOption", "line_number": 53, "usage_type": "call"}, {"api_name": "wai.common.cli.options.FlagOption", "line_number": 58, "usage_type": "call"}, {"api_name": "wai.common.cli.options.FlagOption", "line_number": 63, "usage_type": "call"}, {"api_name": "wai.common.adams.imaging.locateobjects.LocatedObject", "line_number": 78, "usage_type": "call"}, {"api_name": "wai.common.adams.imaging.locateobjects.LocatedObject", "line_number": 85, "usage_type": "name"}, {"api_name": "wai.common.adams.imaging.locateobjects.LocatedObject", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "wai.common.adams.imaging.locateobjects.LocatedObject", "line_number": 116, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 130, "usage_type": "call"}, {"api_name": "shapely.geometry.Polygon", "line_number": 116, "usage_type": "name"}, {"api_name": "wai.common.adams.imaging.locateobjects.LocatedObject", "line_number": 132, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 147, "usage_type": "call"}, {"api_name": "shapely.geometry.Polygon", "line_number": 132, "usage_type": "name"}, {"api_name": "wai.common.adams.imaging.locateobjects.LocatedObject", "line_number": 149, "usage_type": "name"}, {"api_name": "wai.common.adams.imaging.locateobjects.LocatedObject", "line_number": 162, "usage_type": "call"}, {"api_name": "shapely.geometry.GeometryCollection", "line_number": 177, "usage_type": "argument"}, {"api_name": "shapely.geometry.Polygon", "line_number": 179, "usage_type": "argument"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 182, "usage_type": "argument"}, {"api_name": "shapely.geometry.Polygon", "line_number": 184, "usage_type": "argument"}, {"api_name": "shapely.geometry.Polygon", "line_number": 188, "usage_type": "argument"}, {"api_name": "wai.common.geometry.Point", "line_number": 192, "usage_type": "call"}, {"api_name": "wai.common.geometry.Polygon", "line_number": 193, "usage_type": "call"}, {"api_name": "wai.annotations.domain.image.ImageInstance", "line_number": 201, "usage_type": "name"}, {"api_name": "wai.annotations.core.stream.ThenFunction", "line_number": 202, "usage_type": "name"}, {"api_name": "wai.annotations.domain.image.ImageInstance", "line_number": 202, "usage_type": "name"}, {"api_name": "wai.annotations.core.stream.DoneFunction", "line_number": 203, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 216, "usage_type": "call"}, {"api_name": "wai.annotations.domain.image.Image", "line_number": 218, "usage_type": "call"}, {"api_name": "wai.annotations.domain.image.classification.ImageClassificationInstance", "line_number": 221, "usage_type": "argument"}, {"api_name": "wai.annotations.domain.classification.Classification", "line_number": 222, "usage_type": "call"}, {"api_name": "wai.annotations.domain.image.classification.ImageClassificationInstance", "line_number": 223, "usage_type": "call"}, {"api_name": "wai.annotations.domain.image.object_detection.ImageObjectDetectionInstance", "line_number": 225, "usage_type": "argument"}, {"api_name": "wai.annotations.domain.image.object_detection.ImageObjectDetectionInstance", "line_number": 232, "usage_type": "call"}, {"api_name": "wai.common.adams.imaging.locateobjects.LocatedObjects", "line_number": 232, "usage_type": "call"}]}
+{"seq_id": "26773160360", "text": "#-*-coding:utf-8-*-\n\nimport requests\nimport bs4\nimport openpyxl\n\ndef parser_url(url):\n headers = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"}\n response = requests.get(url,headers=headers)\n soup = bs4.BeautifulSoup(response.text.replace('[','').replace(']',''),'html.parser')\n return soup\n\ndef parser_soup(soup):\n target = soup.find_all('p',style='TEXT-INDENT: 2em')\n city = []\n house_price = []\n money = []\n proportion = []\n result = []\n a = []\n\n for i in range(0,37*5,5):\n city.append(target[9+i].text)\n for i in range(0,37*5,5):\n house_price.append(target[10+i].text)\n for i in range(0,37*5,5):\n money.append(target[11+i].text)\n for i in range(0,37*5,5):\n proportion.append(target[12+i].text)\n\n for i in range(0,36):\n result.append(city[i])\n result.append(house_price[i])\n result.append(money[i])\n result.append(proportion[i])\n a.append(result)\n result = []\n return a\ndef write_file(content):\n wb = openpyxl.Workbook()\n wb.guess_types = True\n ws = wb.active\n ws.append(['city','house_price','acerage_salary','proportion'])\n for each in content:\n ws.append(each)\n wb.save('房价比.xlsx')\n print ('生成文件成功!')\n\ndef main():\n url = 'http://news.house.qq.com/a/20170702/003985.htm'\n soup = parser_url(url)\n content = parser_soup(soup)\n write_file(content)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "tao4091/Python", "sub_path": "My_python/MaHaiTao/house_money.py", "file_name": "house_money.py", "file_ext": "py", "file_size_in_byte": 1552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "37620300296", "text": "import pandas as pd\r\nfrom sklearn.naive_bayes import BernoulliNB\r\nfrom sklearn.metrics import accuracy_score\r\nmodel = BernoulliNB()\r\n\r\n#Read data\r\nmnist_train = pd.read_csv('C:\\RPi Server\\!Electrical Engineering\\!Semester 1\\Artificial Intelligence\\mnist_train_th128.csv')\r\nmnist_test = pd.read_csv('C:\\RPi Server\\!Electrical Engineering\\!Semester 1\\Artificial Intelligence\\mnist_test_th128.csv')\r\n\r\ndata_training = mnist_train[0:3001]\r\ndata_testing = mnist_test[0:51]\r\n\r\n#print(data_training)\r\n#print(data_testing)\r\n\r\n#Removing label\r\ndata_training_new = data_training.drop(columns=['label'])\r\ndata_testing_new = data_testing.drop(columns=['label'])\r\n\r\n#print(data_training_new)\r\n#print(data_testing_new)\r\n\r\ndata_training_out = data_training['label']\r\ndata_testing_out = data_testing['label']\r\n#print(data_training_out)\r\n#print(data_testing_out)\r\n\r\nmodel.fit(data_training_new,data_training_out)\r\nexpected = data_testing_out\r\npredicted = model.predict(data_testing_new)\r\nprint(predicted)\r\nprint(\"Error ratio dari 50 percobaan adalah : \",(100-(accuracy_score(expected,predicted)*100)),\"%\")", "repo_name": "afafirmansyah/artificial-intelligence", "sub_path": "Naive Bayes.py", "file_name": "Naive Bayes.py", "file_ext": "py", "file_size_in_byte": 1089, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sklearn.naive_bayes.BernoulliNB", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "4177310918", "text": "from django.http.response import JsonResponse\nfrom django.shortcuts import render,redirect\nfrom lsc_app.forms.md_forms import md\nfrom django.http import HttpResponse\nfrom lsc_app.models import *\nfrom datetime import datetime\nimport json\n\n\ndef load_selected_category_users_ajax(request):\n \"\"\"\n :param request:\n :return: returns a a HttpResponse of users list for the selected user category\n \"\"\"\n user_category = request.GET['user_category']\n\n if user_category == 'MD':\n users = MD.objects.all()\n \n if user_category == 'Doctor':\n users = Doctor.objects.all()\n\n if user_category == 'Therapist':\n users = Therapist.objects.all()\n\n if user_category == 'Receptionist':\n users = Receptionist.objects.all()\n\n #print(users)\n cd = set(users.values_list('name', flat=True).distinct())\n result = '''{}'''.format(list(cd))\n return HttpResponse(result) \n\n\ndef suggest_username_ajax(request):\n \"\"\"\n :param request:\n :return: returns a a HttpResponse of valid new username for the selcted user profile\n \"\"\"\n user_name = request.GET['user_name']\n print(user_name)\n users = User.objects.filter(username=user_name)\n print(users)\n user_profile = UserProfile.objects.all()\n last_user = user_profile.latest('user_id_field')\n print (last_user)\n new_user_id_field = int(last_user.user_id_field) + 1\n print(new_user_id_field)\n if not User.objects.filter(username=user_name).exists():\n data = new_user_id_field\n return HttpResponse(data, content_type='text/plain')\n else:\n data = 409\n return HttpResponse(data,content_type='text/plain')\n\n\nfrom django.core.files.storage import FileSystemStorage\nfrom django.views.decorators.csrf import csrf_exempt,csrf_protect\n\n@csrf_exempt\ndef load_blog_images_ajax(request):\n upload = request.FILES['image']\n print(upload)\n fss = FileSystemStorage()\n # django-summernote/\" + datetime.datetime.now().strftime(\"%Y/%m\")\n file = fss.save(upload.name, upload)\n print(file)\n file_url = fss.url(file)\n print (file_url)\n return HttpResponse(file_url)", "repo_name": "saranyagovindasamy/Djano_Proj", "sub_path": "lakshmisiddhaclinic/lsc_project/lsc_app/views/ajax_views/ajax_views.py", "file_name": "ajax_views.py", "file_ext": "py", "file_size_in_byte": 2135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.http.HttpResponse", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 51, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 64, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 60, "usage_type": "name"}]}
+{"seq_id": "32040704901", "text": "from django.shortcuts import render\nfrom .models import *\nfrom django_pivot.pivot import pivot\nfrom django_pivot.histogram import histogram\nfrom datetime import date\nimport pandas as pd\nimport numpy as np\nfrom django.db.models import Sum, Count\n\ndef RISDB(request):\n # This RIS_Project_Objects Should Change With The Filters\n RIS_Project_Objects = RIS_Project.objects.all()\n\n # This RIS_Project_Object_Static View Should Not Change With The Filter\n RIS_Project_Objects_Static = RIS_Project.objects.all()\n #####################################################################################\n # For Dependent Filters --- #\n #####################################################################################\n # Partner Country is dependant on Sub Region and sub region is dependant on Partner Region\n Partner_Region_Choices = Partner_Region.objects.all()\n Sub_Region_Choices = Sub_Region.objects.all()\n Partner_Country_Choices = Partner_Country.objects.all()\n\n # Modalities\n Modalities_Choices = RIS_Project.objects.values('Modalities').distinct()\n\n # Sub Modalities\n SubModalities_Choices = RIS_Project.objects.values('Sub_Modalities').distinct()\n\n\n # Year\n Year_Choices = RIS_Project.objects.values('Year').distinct().order_by('Year')\n\n #####################################################################################\n # Filters Logic--- #\n #####################################################################################\n\n if 'PartnerRegion' in request.GET:\n PartnerRegion = request.GET.getlist('PartnerRegion')\n # print(PartnerRegion)\n if PartnerRegion:\n RIS_Project_Objects = RIS_Project_Objects.filter(Partner_Region_Code__in=PartnerRegion)\n\n\n if 'SubRegion' in request.GET:\n SubRegion = request.GET.getlist('SubRegion')\n # print(SubRegion)\n if SubRegion:\n RIS_Project_Objects = RIS_Project_Objects.filter(Code_of_Sub_Region__in=SubRegion)\n\n\n if 'PartnerCountry' in request.GET:\n PartnerCountry = request.GET.getlist('PartnerCountry')\n # print(PartnerCountry)\n if PartnerCountry:\n RIS_Project_Objects = RIS_Project_Objects.filter(Partner_Country_Code__in=PartnerCountry)\n\n\n if 'Modalities' in request.GET:\n Modalities = request.GET.getlist('Modalities')\n # print(Modalities)\n if Modalities:\n RIS_Project_Objects = RIS_Project_Objects.filter(Modalities__in=Modalities)\n\n\n if 'SubModalities' in request.GET:\n SubModalities = request.GET.getlist('SubModalities')\n # print(SubModalities)\n if SubModalities:\n RIS_Project_Objects = RIS_Project_Objects.filter(Sub_Modalities__in=SubModalities)\n\n\n\n if 'YearFrom' in request.GET:\n YearFrom = request.GET['YearFrom']\n if YearFrom:\n RIS_Project_Objects = RIS_Project_Objects.filter(Year__gte=YearFrom)\n\n\n if 'YearTo' in request.GET:\n YearTo = request.GET['YearTo']\n if YearTo > YearFrom:\n RIS_Project_Objects = RIS_Project_Objects.filter(Year__lte=YearTo)\n\n if YearTo < YearFrom:\n YearTo = int(YearFrom) + 1\n RIS_Project_Objects = RIS_Project_Objects.filter(Year__lte=YearTo)\n\n\n\n #####################################################################################\n # For Left Sidebar Graphs - #\n\n #####################################################################################\n # For Left Sidebar Cards - #\n #######################################################################################\n # 1 Total Country Benefited\n Total_Country_Benefited_Count = len(RIS_Project_Objects_Static.values('Partner_Country').distinct())-3\n\n # 2 Concessional Finance Total Disbursement\n Total_Disbursement_Of_SubModalities = RIS_Project_Objects_Static.values('Sub_Modalities').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('-total')\n\n # Total_Disbursement_Of_SubModalities.filter('')\n # 3 Total Disbursement_of_development_assistance_USD_million\n Country_Wise_Disbursement_Total = RIS_Project_Objects_Static.values('Partner_Country').order_by('Partner_Country').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('-total')\n Total_Disbursement_of_development_assistance = Country_Wise_Disbursement_Total.aggregate(Sum('total'))['total__sum']\n\n #4 Grant (Modality) Total Disbursement\n Total_Disbursement_Of_Modalities = RIS_Project_Objects_Static.values('Modalities').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('-total')\n\n #5 Total People Trained (usinng total number of slots utilized in capacity building)\n Total_Number_Of_Slots_across_Modalities = RIS_Project_Objects_Static.values('Modalities').annotate(total=Sum('No_of_Slots_Utilized')).order_by('-total')\n\n # Total Trade Concession\n # Get From Total_Disbursement_Of_Modalities\n\n\n # Time-Series Charts (Small left side card)\n # 5 Total Disbursement with Time (Cumulative) - Line Chart\n Total_Disbursement_with_Time_Static_Chart_DF = pd.DataFrame(RIS_Project_Objects_Static.values('Year').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('Year'))\n Total_Disbursement_with_Time_Static_Chart_DF.replace(to_replace=[None], value=0, inplace=True)\n Total_Disbursement_with_Time_Static_Chart_DF['Cumulative_Disbursement_Frequency'] = Total_Disbursement_with_Time_Static_Chart_DF['total'].cumsum()\n Total_Disbursement_with_Time_Static_Chart = Total_Disbursement_with_Time_Static_Chart_DF.to_dict('records')\n\n\n # 6 Sub Modality Wise No_of_Slots_Utilized -Donut Chart\n SubModality_Wise_Number_Of_Slots_Utilized_DF = pd.DataFrame(RIS_Project_Objects_Static.values('Sub_Modalities').annotate(Number_Of_Slots=Sum('No_of_Slots_Utilized')))\n SubModality_Wise_Number_Of_Slots_Utilized_DF.replace(to_replace=[None], value=0, inplace=True)\n SubModality_Wise_Number_Of_Slots_Utilized_Chart = SubModality_Wise_Number_Of_Slots_Utilized_DF.to_dict('records')\n\n\n ###########################################################################################\n # Section For Dynamic Changing Charts With Filters #\n # Here I'm using RIS_Project_Objects That Gets Changed With The Filters\n ###########################################################################################\n\n # 1 Cumulative Disbursement With Time With Partner Region Wise - Bar Chart\n Total_Disbursement_with_Time_Dynamic_Chart_DF = pd.DataFrame(RIS_Project_Objects.values('Year').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('Year'))\n Total_Disbursement_with_Time_Dynamic_Chart_DF.replace(to_replace=[None], value=0, inplace=True)\n Total_Disbursement_with_Time_Dynamic_Chart_DF['Cumulative_Disbursement_Frequency'] = Total_Disbursement_with_Time_Dynamic_Chart_DF['total'].cumsum()\n Total_Disbursement_with_Time_Dynamic_Chart = Total_Disbursement_with_Time_Dynamic_Chart_DF.to_dict('records')\n\n\n # 2 Total Disbursement With Modality - Polar Chart\n Total_Disbursement_With_Modality = RIS_Project_Objects.values('Modalities').annotate(total=Sum('Disbursement_of_development_assistance_USD_million'))\n print(Total_Disbursement_With_Modality,'Total_Disbursement_With_Modality')\n\n # 3 For Geography Mapping\n # Region_Wise_Total_Number_Of_Project_Total_Disbursement_And_Total_Commitment\n\n # Region_Wise_Number_Of_Projects_For_Mapping = RIS_Project_Objects_Static.values('Partner_Region').order_by('Partner_Region').annotate(NumberOfProjects=Count('id'))\n\n Region_Wise_Disbursement_of_development_assistance_USD_million_Commitment_of_development_assistance_USD_million_For_Mapping = RIS_Project_Objects.values('Partner_Country').order_by('Partner_Country').annotate(Disbursement=Sum('Disbursement_of_development_assistance_USD_million'), Commitment=Sum('Commitment_of_development_assistance_USD_million'))\n # print(Region_Wise_Disbursement_of_development_assistance_USD_million_Commitment_of_development_assistance_USD_million_For_Mapping)\n Partner_Country_and_Modalities = RIS_Project_Objects.values('Partner_Country','Modalities')\n\n context = {\n 'RIS_Project_Objects': RIS_Project_Objects,\n # --------------- Filtering Form ---------------------- #\n 'Partner_Region_Choices': Partner_Region_Choices,\n 'Sub_Region_Choices': Sub_Region_Choices,\n 'Partner_Country_Choices': Partner_Country_Choices,\n 'Modalities_Choices': Modalities_Choices,\n 'SubModalities_Choices': SubModalities_Choices,\n\n 'Year_Choices': Year_Choices,\n\n\n #---------------------Left Side Card Stats-----------------#\n 'Total_Country_Benefited_Count': Total_Country_Benefited_Count,\n 'Total_Disbursement_Of_SubModalities': Total_Disbursement_Of_SubModalities,\n 'Total_Disbursement_of_development_assistance': Total_Disbursement_of_development_assistance,\n 'Total_Disbursement_Of_Modalities': Total_Disbursement_Of_Modalities,\n 'Total_Disbursement_with_Time_Static_Chart': Total_Disbursement_with_Time_Static_Chart,\n 'SubModality_Wise_Number_Of_Slots_Utilized_Chart': SubModality_Wise_Number_Of_Slots_Utilized_Chart,\n 'Total_Number_Of_Slots_across_Modalities': Total_Number_Of_Slots_across_Modalities,\n\n #-----------Middle Section Dynamic Changing Charts and Graphs----------------#\n 'Total_Disbursement_with_Time_Dynamic_Chart': Total_Disbursement_with_Time_Dynamic_Chart,\n 'Total_Disbursement_With_Modality': Total_Disbursement_With_Modality,\n 'Partner_Country_and_Modalities' : Partner_Country_and_Modalities,\n\n # Mapping - via Leaflet Bottom Section\n 'Region_Wise_Disbursement_of_development_assistance_USD_million_Commitment_of_development_assistance_USD_million_For_Mapping': Region_Wise_Disbursement_of_development_assistance_USD_million_Commitment_of_development_assistance_USD_million_For_Mapping,\n\n 'values': request.GET\n }\n return render(request, 'RIS_DB/RIS_DB_Home.html', context)\n\n# AJAX For Dynamically Filtering Dropdown Menu\ndef Load_Dependent_Sub_Region_Filters(request):\n Partner_Region_id = request.GET.getlist('PartnerRegionId[]')\n subRegion = Sub_Region.objects.filter(Partner_Region_Name_id__in=Partner_Region_id).distinct()\n partnerCountry = Partner_Country.objects.filter(Partner_Region_Name_id__in=Partner_Region_id).distinct()\n context = {'Sub_Region': subRegion,\n 'partnerCountry': partnerCountry\n }\n return render(request, 'partials/drill_down_filters/DrillDown_Sub_Region_Filter.html', context)\n\n\ndef Load_Dependent_Partner_Country_Filters(request):\n Sub_Region_id = request.GET.getlist('SubRegionId[]')\n partnerCountry = Partner_Country.objects.filter(Sub_Region_Name_id__in=Sub_Region_id).distinct()\n return render(request, 'partials/drill_down_filters/DrilDown_Partner_Country_Filter.html', {'Partner_Country': partnerCountry})\n\n\n\n\n", "repo_name": "rajpaul8/Research_Information_System", "sub_path": "RISDatabase/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 11306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.db.models.Sum", "line_number": 101, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 105, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 138, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 145, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 153, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 188, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 198, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 204, "usage_type": "call"}]}
+{"seq_id": "72539962477", "text": "import logging\n\nfrom client import Client\nfrom torrent import ActiveTorrent\n\ndef main():\n logging.basicConfig(filename='bt.log',\n filemode='w',\n level=logging.DEBUG,\n # format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n format='%(asctime)s - %(levelname)s - %(message)s')\n\n log = logging.getLogger('test_driver')\n log.info('Starting up...')\n\n client = Client()\n client.add_torrent('tom.torrent')\n client.reactor.start()\n\nif __name__ == '__main__':\n main()\n", "repo_name": "angusb/pyrate", "sub_path": "test_driver.py", "file_name": "test_driver.py", "file_ext": "py", "file_size_in_byte": 582, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "logging.basicConfig", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 16, "usage_type": "call"}, {"api_name": "client.add_torrent", "line_number": 17, "usage_type": "call"}, {"api_name": "client.reactor.start", "line_number": 18, "usage_type": "call"}, {"api_name": "client.reactor", "line_number": 18, "usage_type": "attribute"}]}
+{"seq_id": "17805728294", "text": "# 排行榜\nfrom time import sleep\nfrom selenium.webdriver.common.by import By\nfrom Work.Common.base_page import basePage\n\n\nclass rankingPage(basePage):\n rankingMethod1 = (By.XPATH, '/html/body/div[2]/div/div[2]/div/div[2]/ul/li[1]/a')\n rankingMethod2 = (By.XPATH, '/html/body/div[2]/div/div[2]/div/div[2]/ul/li[2]/a')\n rankingMethod3 = (By.XPATH, '/html/body/div[2]/div/div[2]/div/div[2]/ul/li[3]/a')\n rankingMethod4 = (By.XPATH, '/html/body/div[2]/div/div[2]/div/div[2]/ul/li[4]/a')\n\n def getBookList(self):\n bookList = []\n for i in range(30):\n text = self.getText((By.XPATH, f'/html/body/div[2]/div/div[1]/div/div/div[2]/table/tbody/tr[{i + 1}]'))\n bookList.append(text)\n return bookList\n\n def rankingMethodSwitch(self):\n list1 = self.getBookList()\n self.click(self.rankingMethod2)\n sleep(2)\n list2 = self.getBookList()\n assert list1 != list2, '排行榜按钮切换异常'\n\n list1 = self.getBookList()\n self.click(self.rankingMethod3)\n sleep(2)\n list2 = self.getBookList()\n assert list1 != list2, '排行榜按钮切换异常'\n\n list1 = self.getBookList()\n self.click(self.rankingMethod4)\n sleep(2)\n list2 = self.getBookList()\n assert list1 != list2, '排行榜按钮切换异常'\n\n list1 = self.getBookList()\n self.click(self.rankingMethod1)\n sleep(2)\n list2 = self.getBookList()\n assert list1 != list2, '排行榜按钮切换异常'\n", "repo_name": "winterfellll/graduationDesign", "sub_path": "Work/PageObject/ranking_page.py", "file_name": "ranking_page.py", "file_ext": "py", "file_size_in_byte": 1540, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "Work.Common.base_page.basePage", "line_number": 7, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 8, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 8, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 9, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 9, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 10, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 11, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 11, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 16, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 16, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "75027366315", "text": "# count vowels\nfrom collections import defaultdict\nwords = 'supercalifragilisticexpialidocious'\n\nwords_dict = defaultdict(int)\nfor e in words:\n if e in ['a', 'e', 'i', 'o', 'u']:\n words_dict[e] += 1\n\nprint(words_dict)\n", "repo_name": "ihongChen/Advanced-Computer-Programming-in-Python", "sub_path": "src/chapter2_data_structure/count_vowels.py", "file_name": "count_vowels.py", "file_ext": "py", "file_size_in_byte": 228, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "collections.defaultdict", "line_number": 5, "usage_type": "call"}]}
+{"seq_id": "7139315654", "text": "import sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import animation\n\n\ndef rho_red_light(nx, rho_max, rho_in):\n rho = rho_max * np.ones(nx)\n rho[:(nx - 1) * 3.0 / 4.0] = rho_in\n return rho\n\n\ndef computeF(u_max, rho_max, rho):\n return u_max * rho * (1 - rho / rho_max)\n\n\ndef godunov(rho, nt, dt, dx, rho_max, V_max):\n rho_n = np.zeros((nt, len(rho)))\n rho_n[:, :] = rho.copy()\n\n rho_plus = np.zeros_like(rho)\n rho_minus = np.zeros_like(rho)\n flux = np.zeros_like(rho)\n\n for t in xrange(1, nt):\n rho_plus[:-1] = rho[1:]\n rho_minus = rho.copy()\n flux = 0.5 * (computeF(V_max, rho_max, rho_minus) + computeF(V_max, rho_max, rho_plus) + (dx / dt) * (rho_minus - rho_plus))\n rho_n[t, 1:-1] = rho[1:-1] + (dt / dx) * (flux[:-2] - flux[1:-1])\n rho_n[t, 0] = rho[0]\n rho_n[t, -1] = rho[-1]\n rho = rho_n[t].copy()\n\n return rho_n\n\n\ndef muscl(rho, nt, dt, dx, rho_max, V_max):\n rho_n = np.zeros((nt, len(rho)))\n rho_n[:, :] = rho.copy()\n\n rho_plus = np.zeros_like(rho)\n rho_minus = np.zeros_like(rho)\n flux = np.zeros_like(rho)\n rho_star = np.zeros_like(rho)\n\n for t in xrange(1, nt):\n sigma = minmod(rho, dx)\n rho_left = rho + sigma * dx / 2.0\n rho_right = rho - sigma * dx / 2.0\n \n flux_left = computeF(V_max, rho_max, rho_left) \n flux_right = computeF(V_max, rho_max, rho_right)\n flux[:-1] = 0.5 * (flux_right[1:] + flux_left[:-1] - (dx / dt) * (rho_right[1:] - rho_left[:-1]))\n rho_star[1:-1] = rho[1:-1] + (dt / dx) * (flux[:-2] - flux[1:-1])\n\n\n rho_star[0] = rho[0]\n rho_star[-1] = rho[-1]\n\n sigma = minmod(rho_star, dx)\n rho_left = rho_star + sigma * dx / 2.0\n rho_right = rho_star - sigma * dx / 2.0\n\n flux_left = computeF(V_max, rho_max, rho_left)\n flux_right = computeF(V_max, rho_max, rho_right)\n flux[:-1] = 0.5 * (flux_right[1:] + flux_left[:-1] - (dx / dt) * (rho_right[1:] - rho_left[:-1]))\n rho_n[t, 1:-1] = 0.5 * (rho[1:-1] + rho_star[1:-1] + (dt / dx) * (flux[:-2] - flux[1:-1]))\n\n rho_n[t, 0] = rho[0]\n rho_n[t, -1] = rho[-1]\n rho = rho_n[t].copy()\n\n return rho_n\n\n\ndef minmod(e, dx):\n sigma = np.zeros_like(e)\n de_minus = np.ones_like(e)\n de_plus = np.ones_like(e)\n\n de_minus[1:] = (e[1:] - e[:-1]) / dx\n de_plus[:-1] = (e[1:] - e[:-1]) / dx\n \n for i in xrange(1, len(e) - 1):\n if de_minus[i] * de_plus[i] < 0.0:\n sigma[i] = 0.0\n elif np.abs(de_minus[i]) < np.abs(de_plus[i]):\n sigma[i] = de_minus[i]\n else:\n sigma[i] = de_plus[i]\n\n return sigma\n\n\ndef plot(x, rho, filename):\n plt.clf()\n plt.plot(x, rho, color = '#003366', ls = '-', lw = 3)\n plt.ylabel('Traffic density')\n plt.xlabel('Distance')\n plt.ylim(-0.5, 11.0)\n plt.savefig('./src/module3/images/' + filename, format = 'png')\n plt.close()\n\n\ndef main(argv):\n sigma = 1.0\n nx = 101\n nt = 30\n dx = 4.0 / (nx - 2)\n x = np.linspace(0, 4, nx - 1)\n\n rho_in = 5.0\n rho_max = 10.0\n V_max = 1.0\n\n dt = sigma * dx / V_max\n\n rho = rho_red_light(nx - 1, rho_max, rho_in)\n plot(x, rho, 'traffic_04.png')\n\n def animate(data):\n x = np.linspace(0, 4, nx - 1)\n y = data\n line.set_data(x, y)\n return line\n\n\n rho_n = godunov(rho, nt, dt, dx, rho_max, V_max)\n\n fig = plt.figure(facecolor = 'w')\n ax = plt.axes(xlim = (0, 4), ylim = (4.5, 11), xlabel = ('Distance'), ylabel = ('Traffic density'))\n line, = ax.plot([],[], color = '#003366', lw = 2)\n anim = animation.FuncAnimation(fig, animate, frames = rho_n, interval = 50)\n plt.show()\n\n\n rho_n = muscl(rho, nt, dt, dx, rho_max, V_max)\n\n fig = plt.figure(facecolor = 'w')\n ax = plt.axes(xlim = (0, 4), ylim = (4.5, 11), xlabel = ('Distance'), ylabel = ('Traffic density'))\n line, = ax.plot([],[], color = '#003366', lw = 2)\n anim = animation.FuncAnimation(fig, animate, frames = rho_n, interval = 50)\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "repo_name": "cowboysmall-moocs/numerical-mooc", "sub_path": "src/module3/traffic_04.py", "file_name": "traffic_04.py", "file_ext": "py", "file_size_in_byte": 4345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "numpy.ones", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 150, "usage_type": "attribute"}]}
+{"seq_id": "23635974680", "text": "from more_itertools import quantify\nimport requests\nimport time\nfrom supabase import create_client, Client\n\ndef main():\n print(\"Initializing database...\")\n\n supabase: Client = create_client(\n \"https://jnbnzuyiuuaocbltwewu.supabase.co\", \n \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImpuYm56dXlpdXVhb2NibHR3ZXd1Iiwicm9sZSI6Im\" +\n \"Fub24iLCJpYXQiOjE2NjY3MjExMjEsImV4cCI6MTk4MjI5NzEyMX0.vnmH8LhJevM1ju-l9d0MnRXL6BmGNjOTw5XS0vO6NHY\"\n )\n\n num_pages = 10\n page_size = 10000\n to_dollars = 0.9965\n profit_margin = 1.05\n\n load_categories = False\n\n if load_categories:\n categories_respose = requests.get(\"https://api.bigbuy.eu/rest/catalog/categories.json?isoCode=en\",\n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n\n categories_respose.raise_for_status()\n categories = categories_respose.json()\n\n print(\"Loading Categories...\")\n for cat in categories:\n id = cat[\"id\"]\n name = cat[\"name\"]\n parentCategory = cat[\"parentCategory\"]\n images = cat[\"urlImages\"]\n\n data = supabase.table(\"Category\").insert({\n \"id\": id,\n \"name\": name,\n \"parentCategory\": parentCategory,\n \"imageUrl\": images[1]\n }).execute()\n print(data)\n else: print(\"Skipping Categories...\")\n\n print(\"Loading Products...\")\n for i in range(0, num_pages):\n print(\"Loading Page... \" + str(i))\n\n products_response = requests.get(\"https://api.bigbuy.eu/rest/catalog/productsstockavailable.json?isoCode=en&pageSize=\" + str(page_size) + \"&page=\" + str(i), \n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n products_response.raise_for_status()\n products = products_response.json()\n\n for product in products:\n try:\n id = product[\"id\"]\n\n print(\"Loading Product Info... \" + str(id))\n product_info_response = requests.get(\"https://api.bigbuy.eu/rest/catalog/productinformation/\" + str(id) + \".json?isoCode=en\", \n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n product_info_response.raise_for_status()\n product_info = product_info_response.json()\n\n print(\"Loading Product Images... \" + str(id))\n product_images_response = requests.get(\"https://api.bigbuy.eu/rest/catalog/productimages/\" + str(id) + \".json?isoCode=en\", \n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n product_images_response.raise_for_status()\n product_images = product_images_response.json()\n\n print(\"Loading product category info... \" + str(id))\n product_category_response = requests.get(\"https://api.bigbuy.eu/rest/catalog/product/\" + str(id) + \".json\", \n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n product_category_response.raise_for_status()\n product_category = product_category_response.json()\n \n sku = product_category[\"sku\"]\n category = product_category[\"category\"]\n wholesale_price = round((product_category[\"wholesalePrice\"] * to_dollars) * profit_margin, 2)\n retail_price = round((product_category[\"retailPrice\"] * to_dollars) * profit_margin, 2)\n in_shop_price = round((product_category[\"inShopsPrice\"] * to_dollars) * profit_margin, 2)\n\n name = product_info[0][\"name\"]\n description = product_info[0][\"description\"]\n images = product_images[\"images\"]\n\n quantity = product[\"stocks\"][0][\"quantity\"]\n\n data = supabase.table(\"Product\").insert({\n \"id\": id,\n \"sku\": sku,\n \"category\": category,\n \"wholesalePrice\": wholesale_price,\n \"retailPrice\": retail_price,\n \"inShopPrice\": in_shop_price,\n \"name\": name, \n \"description\": description,\n \"images\": images,\n \"quantity\": quantity\n }).execute()\n\n print(data)\n time.sleep(5.0)\n \n except:\n print(\"Cant load item... \" + str(id))\n continue\n\nif __name__ == \"__main__\":\n run_database = True\n\n if run_database:\n main()\n", "repo_name": "seiyadragon/rosscostore", "sub_path": "database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 4894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "supabase.Client", "line_number": 9, "usage_type": "name"}, {"api_name": "supabase.create_client", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 23, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 65, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 88, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 102, "usage_type": "call"}]}
+{"seq_id": "30092222162", "text": "from coffea import hist, processor\nfrom copy import deepcopy\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom tqdm import tqdm\nimport pickle\nimport lz4.frame\nimport numpy\nimport pandas\nimport awkward\nfrom functools import partial\n\nfrom ..executor import _futures_handler\nfrom coffea.nanoevents import NanoEventsFactory, schemas\nfrom coffea.nanoevents.mapping import SimplePreloadedColumnSource\n\nimport pyspark\nimport pyspark.sql.functions as fn\nfrom pyspark.sql.types import BinaryType, StringType, StructType, StructField\n\nfrom jinja2 import Environment, PackageLoader, select_autoescape\nfrom coffea.util import awkward\n\nlz4_clevel = 1\n\n\n# this is a UDF that takes care of summing histograms across\n# various spark results where the outputs are histogram blobs\ndef agg_histos_raw(series, processor_instance, lz4_clevel):\n goodlines = series[series.str.len() > 0]\n if goodlines.size == 1: # short-circuit trivial aggregations\n return goodlines[0]\n outhist = processor_instance.accumulator.identity()\n for line in goodlines:\n outhist.add(pickle.loads(lz4.frame.decompress(line)))\n return lz4.frame.compress(pickle.dumps(outhist), compression_level=lz4_clevel)\n\n\n@fn.pandas_udf(BinaryType(), fn.PandasUDFType.GROUPED_AGG)\ndef agg_histos(series):\n global processor_instance, lz4_clevel\n return agg_histos_raw(series, processor_instance, lz4_clevel)\n\n\ndef reduce_histos_raw(df, processor_instance, lz4_clevel):\n histos = df['histos']\n mask = (histos.str.len() > 0)\n outhist = processor_instance.accumulator.identity()\n for line in histos[mask]:\n outhist.add(pickle.loads(lz4.frame.decompress(line)))\n return pandas.DataFrame(data={'histos': numpy.array([lz4.frame.compress(pickle.dumps(outhist), compression_level=lz4_clevel)], dtype='O')})\n\n\n@fn.pandas_udf(StructType([StructField('histos', BinaryType(), True)]), fn.PandasUDFType.GROUPED_MAP)\ndef reduce_histos(df):\n global processor_instance, lz4_clevel\n return reduce_histos_raw(df, processor_instance, lz4_clevel)\n\n\nclass SparkExecutor(object):\n _template_name = 'spark.py.tmpl'\n\n def __init__(self):\n self._cacheddfs = None\n self._rawresults = None\n self._counts = None\n self._env = Environment(loader=PackageLoader('coffea.processor',\n 'templates'),\n autoescape=select_autoescape(['py'])\n )\n\n @property\n def counts(self):\n return self._counts\n\n def __call__(self, spark, dfslist, theprocessor, output, thread_workers,\n use_df_cache, schema, status=True, unit='datasets', desc='Processing'):\n # processor needs to be a global\n global processor_instance, coffea_udf, nano_schema\n processor_instance = theprocessor\n if schema is None:\n schema = schemas.BaseSchema\n if not issubclass(schema, schemas.BaseSchema):\n raise ValueError(\"Expected schema to derive from BaseSchema (%s)\" % (str(schema.__name__)))\n nano_schema = schema\n # get columns from processor\n columns = processor_instance.columns\n cols_w_ds = ['dataset'] + columns\n # make our udf\n tmpl = self._env.get_template(self._template_name)\n render = tmpl.render(cols=columns)\n exec(render)\n\n # cache the input datasets if it's not already done\n if self._counts is None:\n self._counts = {}\n # go through each dataset and thin down to the columns we want\n for ds, (df, counts) in dfslist.items():\n self._counts[ds] = counts\n\n def spex_accumulator(total, result):\n ds, df = result\n total[ds] = df\n\n if self._cacheddfs is None:\n self._cacheddfs = {}\n cachedesc = 'caching' if use_df_cache else 'pruning'\n with ThreadPoolExecutor(max_workers=thread_workers) as executor:\n futures = set()\n for ds, (df, counts) in dfslist.items():\n futures.add(executor.submit(self._pruneandcache_data, ds, df, cols_w_ds, use_df_cache))\n _futures_handler(futures, self._cacheddfs, status, unit, cachedesc, spex_accumulator, None)\n\n with ThreadPoolExecutor(max_workers=thread_workers) as executor:\n futures = set()\n for ds, df in self._cacheddfs.items():\n co_udf = coffea_udf\n futures.add(executor.submit(self._launch_analysis, ds, df, co_udf, cols_w_ds))\n # wait for the spark jobs to come in\n self._rawresults = {}\n _futures_handler(futures, self._rawresults, status, unit, desc, spex_accumulator, None)\n\n for ds, bitstream in self._rawresults.items():\n if bitstream is None:\n raise Exception('No pandas dataframe returned from spark in dataset: %s, something went wrong!' % ds)\n if bitstream.empty:\n raise Exception('The histogram list returned from spark is empty in dataset: %s, something went wrong!' % ds)\n bits = bitstream[bitstream.columns[0]][0]\n output.add(pickle.loads(lz4.frame.decompress(bits)))\n\n def _pruneandcache_data(self, ds, df, columns, cacheit):\n if cacheit:\n return ds, df.select(*columns).cache()\n return ds, df.select(*columns)\n\n def _launch_analysis(self, ds, df, udf, columns):\n histo_map_parts = (df.rdd.getNumPartitions() // 20) + 1\n return ds, df.select(udf(*columns).alias('histos')) \\\n .withColumn('hpid', fn.spark_partition_id() % histo_map_parts) \\\n .repartition(histo_map_parts, 'hpid') \\\n .groupBy('hpid').apply(reduce_histos) \\\n .groupBy().agg(agg_histos('histos')) \\\n .toPandas()\n\n\nspark_executor = SparkExecutor()\n", "repo_name": "LBJ-Wade/coffea", "sub_path": "coffea/processor/spark/spark_executor.py", "file_name": "spark_executor.py", "file_ext": "py", "file_size_in_byte": 5929, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "pickle.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "lz4.frame.frame.decompress", "line_number": 35, "usage_type": "call"}, {"api_name": "lz4.frame.frame", "line_number": 35, "usage_type": "attribute"}, {"api_name": "lz4.frame", "line_number": 35, "usage_type": "name"}, {"api_name": "lz4.frame.frame.compress", "line_number": 36, "usage_type": "call"}, {"api_name": "lz4.frame.frame", "line_number": 36, "usage_type": "attribute"}, {"api_name": "lz4.frame", "line_number": 36, "usage_type": "name"}, {"api_name": "pickle.dumps", "line_number": 36, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.pandas_udf", "line_number": 39, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 39, "usage_type": "name"}, {"api_name": "pyspark.sql.types.BinaryType", "line_number": 39, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.PandasUDFType", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pickle.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "lz4.frame.frame.decompress", "line_number": 50, "usage_type": "call"}, {"api_name": "lz4.frame.frame", "line_number": 50, "usage_type": "attribute"}, {"api_name": "lz4.frame", "line_number": 50, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "lz4.frame.frame.compress", "line_number": 51, "usage_type": "call"}, {"api_name": "lz4.frame.frame", "line_number": 51, "usage_type": "attribute"}, {"api_name": "lz4.frame", "line_number": 51, "usage_type": "name"}, {"api_name": "pickle.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.pandas_udf", "line_number": 54, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 54, "usage_type": "name"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 54, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 54, "usage_type": "call"}, {"api_name": "pyspark.sql.types.BinaryType", "line_number": 54, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.PandasUDFType", "line_number": 54, "usage_type": "attribute"}, {"api_name": "jinja2.Environment", "line_number": 67, "usage_type": "call"}, {"api_name": "jinja2.PackageLoader", "line_number": 67, "usage_type": "call"}, {"api_name": "jinja2.select_autoescape", "line_number": 69, "usage_type": "call"}, {"api_name": "coffea.nanoevents.schemas.BaseSchema", "line_number": 82, "usage_type": "attribute"}, {"api_name": "coffea.nanoevents.schemas", "line_number": 82, "usage_type": "name"}, {"api_name": "coffea.nanoevents.schemas.BaseSchema", "line_number": 83, "usage_type": "attribute"}, {"api_name": "coffea.nanoevents.schemas", "line_number": 83, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 108, "usage_type": "call"}, {"api_name": "executor.submit", "line_number": 111, "usage_type": "call"}, {"api_name": "executor._futures_handler", "line_number": 112, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 114, "usage_type": "call"}, {"api_name": "executor.submit", "line_number": 118, "usage_type": "call"}, {"api_name": "executor._futures_handler", "line_number": 121, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 129, "usage_type": "call"}, {"api_name": "lz4.frame.frame.decompress", "line_number": 129, "usage_type": "call"}, {"api_name": "lz4.frame.frame", "line_number": 129, "usage_type": "attribute"}, {"api_name": "lz4.frame", "line_number": 129, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.spark_partition_id", "line_number": 139, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 139, "usage_type": "name"}]}
+{"seq_id": "22166554183", "text": "import logging\n\nimport analytics\nfrom django.core import exceptions\n\nfrom server.utils.logging.constants import SCHOOL_MEMBER\n\nlogger = logging.getLogger(__name__)\n\nEVENT_SESSION_LOGIN = \"session_login\"\nEVENT_APP_LOGIN = \"app_login\"\nEVENT_INITIAL_PASSWORD_CREATED = \"initial_password_created\"\n\n\ndef identify_track(user, event_name, properties=None):\n traits = {\n \"name\": user.name,\n \"email\": user.email,\n \"user_type\": user.user_type,\n }\n\n if user.user_type in [user.Types.CONSUMER, user.Types.CONSUMER]:\n try:\n traits[\"school\"] = user.school_member.school.name\n except exceptions.ObjectDoesNotExist:\n logger.exception(\n f\"{SCHOOL_MEMBER} user pk: {user.pk} email: {user.email} has no school_member or school\"\n )\n\n analytics.identify(\n user.slug,\n traits,\n )\n\n return analytics.track(user.slug, event_name, properties)\n", "repo_name": "connectiveproject/connective", "sub_path": "server/server/utils/analytics_utils/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.core.exceptions", "line_number": 25, "usage_type": "name"}, {"api_name": "server.utils.logging.constants.SCHOOL_MEMBER", "line_number": 27, "usage_type": "name"}, {"api_name": "analytics.identify", "line_number": 30, "usage_type": "call"}, {"api_name": "analytics.track", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "8916604187", "text": "from django.db import models\n\n__all__ = (\n 'Idol',\n 'Group',\n 'Membership',\n)\n\n\n# class IdolManager(models.Manager):\n# pass\n\nclass Idol(models.Model):\n name = models.CharField(max_length=30)\n\n # 오브젝츠 = IdolManager()\n # objects = models.Manager()\n\n def __str__(self):\n return self.name\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=50)\n debut_date = models.DateField()\n members = models.ManyToManyField(\n Idol,\n through='Membership',\n through_fields=('group', 'idol'),\n )\n\n def __str__(self):\n return self.name\n\n\nclass Membership(models.Model):\n idol = models.ForeignKey(\n Idol,\n on_delete=models.CASCADE,\n related_name='membership_set'\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE)\n # # Idol,\n # # null=True,\n # # on_delete=models.CASCADE,\n # # related_name='recommend_membership_set',\n # )\n recommenders = models.ManyToManyField(\n Idol,\n blank=True,\n related_name='recommend_membership_set',\n )\n joined_date = models.DateField()\n is_active = models.BooleanField()\n\n def __str__(self):\n return f'{self.group.name}' \\\n f'{self.idol.name}' \\\n f'({self.is_active.name})'\n", "repo_name": "ehfgk78/Django-Documentation", "sub_path": "django_document/model/models/many2many/intermediate.py", "file_name": "intermediate.py", "file_ext": "py", "file_size_in_byte": 1319, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.db.models.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}]}
+{"seq_id": "13858739436", "text": "\"\"\"\nThis file is used to update GT with .csv file created by Yujie\nFormat:\nir_name, frame_name, target_position, screen_index, pupil_coord, purkj_coord\nWe only use target_position and screen_index.\n\"\"\"\nimport numpy as np\nimport argparse\nimport os\nimport datetime\nimport pandas as pd\nimport shutil\nimport cv2\nimport random\n\ndef extract_red_channel(image):\n assert image.shape[2] == 3 # check if this is a color image\n new_image = image[:,:,2]\n return new_image\n\n\ndef preprocess_image(cams, root_path):\n \"\"\" Preprocess images. Extract red channel, turn image upside down vertically. \"\"\"\n for i in cams:\n dir_path = os.path.join(root_path, 'video%d'%i)\n dirs = [x for x in os.listdir(dir_path) if x.startswith('rgb_spot')]\n for directory in dirs:\n full_path = os.path.join(dir_path, directory)\n gray_dir = full_path.replace('rgb_', '')\n if not os.path.exists(gray_dir):\n os.makedirs(gray_dir)\n files = os.listdir(full_path)\n for f in files:\n image = cv2.imread(os.path.join(full_path, f))\n red_image = extract_red_channel(image)\n cv2.imwrite(os.path.join(gray_dir, f), red_image)\n\n\ndef compute_GT(infos):\n \"\"\" Preprocess csv files. Compute ground truth data (x,y,z), size: (num_images, 3). \"\"\"\n # TODO: numbers below are not super accurate, need to think about the influence\n # display settings\n \"\"\"\n \n ----------------- \n - - \n ----- - - \n ----- - - - - \n eye - - ----- - - \n ----- ----------------- \n \n \n 0 1 2 \n 33cm 50.5cm 119cm (depth is inaccurate)\n W x H (w x h in pixel):\n 0: 20 x 15 cm (2048 x 1536)\n 1: 20 x 15 cm (2048 x 1536)\n 2: 70.8 x 39.8 cm (2560 x 1440)\n 3: 20 x 15 cm (2048 x 1536), move 0 5cm further\n 3: 20 x 15 cm (2048 x 1536), move 1 5cm further\n \"\"\"\n height_px = [1536, 1536, 1440, 1920, 1536]\n width_px = [2048, 2048, 2560, 1080, 2048]\n width_cm = [20, 20, 70.8, 20, 20]\n height_cm = [x*1.0*y/z for x,y,z in zip(width_cm, height_px, width_px)]\n cm_to_pix_scale = [x*1.0/y for x,y in zip(width_px, width_cm)]\n\n # spot settings of reflected display\n spot_depth = [33, 54.5, 119, 38, 59.5] # cm\n offset_x = [-0.35, -0.35, -0.35, -0.35, -0.35] # cm, offset of eye center and reflected display center in the x direction\n offset_y = [1.075, 1.4, 0, 1.075, 1.4] # cm, offset of eye center and reflected display center in the y direction\n\n # bench type settings\n # Note: Origin is center of right eye\n \"\"\"\n xxxxxxxxxxxxxxxxxxxxx O is the origin\n xx xx xx xx\n xx eye xx xx 0 xx\n xxxxxxxxx xxxxxxxxx\n \"\"\"\n new_target_position = np.zeros((len(infos), 3))\n for i in range(len(infos)):\n screen_id = infos.iloc[i][3]\n target_x = float((infos.iloc[i][2]).split(',')[0][1:])\n target_y = float((infos.iloc[i][2]).split(',')[1][:-1])\n\n x = target_x / cm_to_pix_scale[screen_id]\n y = target_y / cm_to_pix_scale[screen_id]\n ############ upside down cameras ############\n #if screen_id < 3: # should reflect 0, 1, 2: \n y = -y\n #############################################\n x = x + offset_x[screen_id]\n y = y + offset_y[screen_id]\n z = spot_depth[screen_id]\n\n new_target_position[i] = np.array([x,y,z])\n return new_target_position\n\n\ndef write_csv(filename, target_position, infos, dirname):\n \"\"\" Write final ground truth data to .csv file. \"\"\"\n \"\"\" Format: (unit is cm)\n name spot frame x y z pupil_x pupil_y pupil_radius purkj1_x purkj1_y purkj3_x purkj3_y\n conny_4_3 1 image_00501.png 5.46 0.0 50.0 23 26 23 12 14 166 277\n \"\"\"\n num_images = len(infos)\n pupils = np.zeros((num_images, 3))\n purkj1 = np.zeros((num_images, 2))\n purkj3 = np.zeros((num_images, 2))\n for i in range(num_images):\n pupils[i, 0] = float(infos.iloc[i,4].split(',')[0][1:])\n pupils[i, 1] = float(infos.iloc[i,4].split(',')[1])\n pupils[i, 2] = float(infos.iloc[i,4].split(',')[2][:-1])\n purkj1[i, 0] = float(infos.iloc[i,5].split(',')[0][2:])\n purkj1[i, 1] = float(infos.iloc[i,5].split(',')[1][:-1])\n purkj3[i, 0] = float(infos.iloc[i,5].split(',')[2][2:])\n try:\n purkj3[i, 1] = float(infos.iloc[i,5].split(',')[3][:-1])\n except:\n purkj3[i, 1] = float(infos.iloc[i,5].split(',')[3][:-2])\n df = pd.DataFrame({\"name\":[dirname]*num_images, \n \"spot\":infos.iloc[:,3],\n \"frame\":infos.iloc[:,1] + '.png',\n \"x\":target_position[:,0],\n \"y\":target_position[:,1],\n \"z\":target_position[:,2],\n \"pupil_x\":pupils[:,0],\n \"pupil_y\":pupils[:,1],\n \"pupil_radius\":pupils[:,2],\n \"purkj1_x\":purkj1[:,0],\n \"purkj1_y\":purkj1[:,1],\n \"purkj3_x\":purkj3[:,0],\n \"purkj3_y\":purkj3[:,1]}) \n df.to_csv(filename, index=False)\n return df\n\n \ndef main(args):\n dirname = os.path.join(args.root, args.dirname)\n csv_file = dirname + '.csv'\n\n infos = pd.read_csv(csv_file)\n print(\"===> Computing ground truth\")\n gt = compute_GT(infos) # (num_images, 3)\n filename = os.path.join('/data/connylu/eye_data/worldnew.csv')\n print(\"===> Writing final .csv file\")\n df = write_csv(filename, gt, infos, args.dirname)\n \n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description='Generate ground truth.')\n parser.add_argument('--root', type=str, default='/data/connylu/eye_data',\n help='root path')\n parser.add_argument('-d', '--dirname', type=str, required=True,\n help='directory name, world or eye, e.g. world')\n# parser.add_argument('-ipd', type=float, required=True,\n# help='IPD of user')\n# parser.add_argument('--num_cam', type=int, default = 8,\n# help='number of cameras')\n args = parser.parse_args()\n main(args)\n", "repo_name": "criminalking/COMP790", "sub_path": "eyetracker/code/get_data/extract_images_purkinje.py", "file_name": "extract_images_purkinje.py", "file_ext": "py", "file_size_in_byte": 6648, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 31, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 152, "usage_type": "call"}]}
+{"seq_id": "33619968863", "text": "from __future__ import annotations\n\nimport errno\nimport os\nimport shutil\n\nfrom collections import defaultdict\nfrom concurrent.futures import as_completed\nfrom dataclasses import dataclass\nfrom functools import lru_cache\nfrom itertools import chain\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import Mapping\nfrom typing import NewType\nfrom typing import TypedDict\n\nimport yaml\n\nfrom deb_pkg_tools.deps import AbstractRelationship\nfrom deb_pkg_tools.deps import parse_depends\nfrom deb_pkg_tools.package import ArchiveEntry\nfrom deb_pkg_tools.package import PackageFile\nfrom deb_pkg_tools.package import collect_related_packages\nfrom deb_pkg_tools.package import inspect_package_contents\nfrom deb_pkg_tools.package import parse_filename\nfrom deb_pkg_tools.utils import find_installed_version\n\nfrom rosenv.environment.distro import RosDistribution\nfrom rosenv.environment.distro import parse_distro\nfrom rosenv.environment.locate import locate\nfrom rosenv.environment.run_command import CommandAbortedError\nfrom rosenv.environment.run_command import CommandFailedError\nfrom rosenv.environment.shell import RosEnvShell\nfrom rosenv.ros_package.package import PackageName\nfrom rosenv.rosdep.rosdep import ResolvedPackageName\nfrom rosenv.rosdep.rosdep import Rosdep\nfrom rosenv.util.cancelable_executor import CancelableExecutor\nfrom rosenv.util.cpu_count import get_cpu_count\nfrom rosenv.util.paths import remove_slash_prefix\n\n\n_logger = getLogger(__name__)\n\nDEFAULT_ROSENV_NAME = \"rosenv\"\n\nDebName = NewType(\"DebName\", str)\n\n\n@dataclass()\nclass Installable:\n name: PackageName\n deb_name: DebName\n location: Path\n\n\nclass UnmetDependencyError(Exception):\n def __init__(self, package: str, missing_dependencies: list[AbstractRelationship]) -> None:\n super().__init__(\n f\"{package} dependencies not found: {', '.join([str(dependency) for dependency in missing_dependencies])}\",\n )\n\n\n@lru_cache\ndef _get_installed_files(rosenv_path: Path, deb_path: Path) -> list[Path]:\n contents = inspect_package_contents(str(deb_path))\n return [rosenv_path / remove_slash_prefix(file) for file in contents]\n\n\nclass PackageIsNotInstalledError(Exception):\n def __init__(self, package: str) -> None:\n super().__init__(f\"Package {package} is not installed\")\n\n\nclass RemoveDependencyError(Exception):\n def __init__(self, package: str, dependent_packages: list[PackageName]) -> None:\n super().__init__(\n f\"Removing of Package {package} prohibited. {package} is a dependency for: {dependent_packages}\",\n )\n\n\nclass FileAlreadyInstalledError(Exception):\n def __init__(self, file: Path, installed_by_packages: list[PackageName]) -> None:\n self.file = file\n self.installed_by_packages = installed_by_packages\n super().__init__(f\"{file} already installed by {installed_by_packages}\")\n\n\nInstalledPackages = Dict[PackageName, Path]\n\n\nclass SettingsFile(TypedDict):\n installed_packages: dict[PackageName, str]\n ros_distro: str\n\n\nclass RosEnvSettings:\n def __init__(\n self,\n settings_file: Path,\n installed_packages: InstalledPackages,\n ros_distro: RosDistribution,\n ) -> None:\n self._settings_file = settings_file\n self.installed_packages = installed_packages\n self.ros_distro: RosDistribution = ros_distro\n\n @classmethod\n def read(cls, rosenv_path: Path) -> RosEnvSettings:\n settings_file = cls.get_settings_path(rosenv_path)\n settings: SettingsFile = yaml.safe_load(settings_file.read_text())\n installed_packages = {key: Path(value) for key, value in settings[\"installed_packages\"].items()}\n ros_distro = parse_distro(settings[\"ros_distro\"])\n return cls(\n settings_file=settings_file,\n installed_packages=installed_packages,\n ros_distro=ros_distro,\n )\n\n @staticmethod\n def initialize(rosenv_path: Path, ros_distro: RosDistribution) -> None:\n RosEnvSettings(\n settings_file=RosEnvSettings.get_settings_path(rosenv_path),\n installed_packages={},\n ros_distro=ros_distro,\n ).save()\n\n @staticmethod\n def get_settings_path(rosenv_path: Path) -> Path:\n return rosenv_path / \"rosenv/settings.yaml\"\n\n def save(self) -> None:\n if not self._settings_file.exists():\n self._settings_file.parent.mkdir(parents=True, exist_ok=True)\n\n self._settings_file.write_text(yaml.safe_dump(self._as_dict()))\n\n def add_installed(self, name: PackageName, location: Path) -> None:\n self.installed_packages[name] = location.absolute()\n self.save()\n\n def _as_dict(self) -> SettingsFile:\n return {\n \"installed_packages\": {key: str(value) for key, value in self.installed_packages.items()},\n \"ros_distro\": self.ros_distro,\n }\n\n def remove_installed(self, name: PackageName) -> None:\n self.installed_packages[name].unlink()\n del self.installed_packages[name]\n self.save()\n\n\nclass RosEnv:\n def __init__(self) -> None:\n self.path = locate(DEFAULT_ROSENV_NAME)\n # TODO(Moritz): /opt/ros/noetic is only correct if venv was created with default ros-path\n # https://dmz-gitlab.honda-ri.de/SSE/rosenv/-/issues/28\n self._settings = RosEnvSettings.read(self.path)\n self.shell = RosEnvShell(self.path / f\"opt/ros/{self._settings.ros_distro}/setup.sh\")\n self._rosdep: Rosdep | None = None\n\n @property\n def rosdep(self) -> Rosdep:\n if self._rosdep is None:\n self._rosdep = Rosdep(self.path, self.shell)\n return self._rosdep\n\n @property\n def ros_distro(self) -> RosDistribution:\n return self._settings.ros_distro\n\n @property\n def _packages_path(self) -> Path:\n return self.path / \"rosenv/packages/\"\n\n @property\n def _install_path(self) -> Path:\n return self.path\n\n def _copy(self, installable: Installable) -> Path:\n self._packages_path.mkdir(parents=True, exist_ok=True)\n\n saved_package = self._packages_path / installable.deb_name\n shutil.copy(installable.location, saved_package)\n return saved_package.resolve()\n\n def is_installed(self, package_name: PackageName) -> bool:\n return package_name in self._settings.installed_packages\n\n def get_package_deb_path(self, package_name: PackageName) -> Path:\n return self._settings.installed_packages[package_name]\n\n def get_installed_packages(self) -> list[PackageName]:\n return list(self._settings.installed_packages.keys())\n\n @staticmethod\n def _is_dependency(resolved_package_name: ResolvedPackageName, dependent: Path) -> bool:\n return any(package.name == resolved_package_name for package in collect_related_packages(dependent))\n\n def _get_dependent_packages(self, dependency_package: PackageName) -> list[PackageName]:\n resolved_package_name = self.rosdep.resolve(package_name=dependency_package)\n\n with CancelableExecutor(max_workers=get_cpu_count(minimum=4)) as pool:\n futures = {\n pool.submit(self._is_dependency, resolved_package_name, path_to_debian): package_name\n for package_name, path_to_debian in self._settings.installed_packages.items()\n }\n\n return [futures[future] for future in as_completed(futures) if future.result()]\n\n @staticmethod\n def _re_init_symlinked_dir(folder: Path) -> None:\n source_path = Path(os.readlink(folder.absolute()))\n folder.unlink()\n folder.mkdir()\n _logger.debug(\"reinit symlinks one level below for folder: %s\", folder)\n for source in source_path.iterdir():\n target = folder / source.name\n _logger.debug(\"creating symlink: %s -> %s\", target, source)\n target.symlink_to(source, target_is_directory=source.is_dir())\n\n def _to_rosenv_root_absolute(self, file: Path | str) -> Path:\n return self._install_path / remove_slash_prefix(file)\n\n def _build_package_file_lookup(self) -> dict[Path, list[PackageName]]:\n lookup = defaultdict(list)\n inspection_items = (\n (file_path, package)\n for package, deb_path in self._settings.installed_packages.items()\n for file_path in _get_installed_files(self._install_path, deb_path)\n )\n for path, package in inspection_items:\n lookup[path].append(package)\n\n return lookup\n\n def _find_installed_by_packages(self, file: Path) -> list[PackageName]:\n files_installed_by_package = self._build_package_file_lookup()\n return files_installed_by_package[file]\n\n def _handle_package_contents(self, installable: Installable, *, overwrite: bool) -> None:\n contents: dict[str, ArchiveEntry] = inspect_package_contents(str(installable.location))\n for package_path in contents:\n installed_file_path = self._to_rosenv_root_absolute(package_path)\n _logger.debug(\n \"Trying to install: installed_file_path=%s package_path=%s symlink:%s\",\n installed_file_path,\n package_path,\n contents[package_path].target,\n )\n if installed_file_path.is_file():\n installed_by = self._find_installed_by_packages(installed_file_path)\n if overwrite:\n _logger.warning(\n \"File exists in rosenv, will be overwritten: %s installed by %s\",\n str(installed_file_path.relative_to(self.path)),\n installed_by,\n )\n else:\n raise FileAlreadyInstalledError(installed_file_path, installed_by)\n elif installed_file_path.is_symlink() and contents[package_path].target == \"\":\n _logger.debug(\"Symlinked dir exists in rosenv: %s\", str(installed_file_path))\n self._re_init_symlinked_dir(installed_file_path)\n\n def _get_dependencies_of(\n self,\n installable: Installable,\n ) -> Iterator[AbstractRelationship]:\n return chain.from_iterable(\n parse_depends(self.shell.run(f\"dpkg-deb -f {installable.location} {field}\"))\n for field in (\"depends\", \"pre-depends\")\n )\n\n @staticmethod\n def _get_system_installed_version(dependency_name: str) -> str | None:\n return find_installed_version(dependency_name) # type: ignore[no-any-return]\n\n def _get_rosenv_installed_debs(self) -> Mapping[str, PackageFile]:\n if not self._packages_path.exists():\n return {}\n file_names = (parse_filename(filename) for filename in self._packages_path.iterdir())\n return {package_file.name: package_file for package_file in file_names}\n\n @staticmethod\n def _is_met_via_rosenv(\n name: str,\n dependency: AbstractRelationship,\n rosenv_installed_debs: Mapping[str, PackageFile],\n ) -> bool:\n return name in rosenv_installed_debs and dependency.matches(\n name,\n rosenv_installed_debs[name].version,\n )\n\n @staticmethod\n def _is_met_via_system(\n name: str,\n dependency: AbstractRelationship,\n ) -> bool:\n system_version = RosEnv._get_system_installed_version(name)\n # System dependencies MUST have a version. So if no version can be found, package is not installed\n return system_version is not None and dependency.matches(name, system_version)\n\n @staticmethod\n def _is_dependency_met(\n dependency: AbstractRelationship,\n rosenv_installed_debs: Mapping[str, PackageFile],\n ) -> bool:\n _logger.debug(\"Checking dependency: %s\", dependency)\n\n return any(\n RosEnv._is_met_via_rosenv(alternative, dependency, rosenv_installed_debs)\n or RosEnv._is_met_via_system(\n alternative,\n dependency,\n )\n for alternative in dependency.names\n )\n\n def _check_for_dependencies(self, installable: Installable) -> None:\n _logger.debug(\"Checking dependencies of %s\", installable.name)\n rosenv_installed_debs = self._get_rosenv_installed_debs()\n\n unmet_dependencies = [\n dependency\n for dependency in self._get_dependencies_of(installable)\n if not RosEnv._is_dependency_met(dependency, rosenv_installed_debs)\n ]\n\n if len(unmet_dependencies) != 0:\n raise UnmetDependencyError(installable.name, unmet_dependencies)\n\n def install(self, installable: Installable, *, overwrite: bool, check_dependencies: bool) -> None:\n package_name = installable.name\n\n if check_dependencies:\n self._check_for_dependencies(installable)\n\n if self.is_installed(package_name):\n if overwrite:\n _logger.info(\"Removing already installed package %s\", package_name)\n self.uninstall(package_name, force=True)\n else:\n _logger.info(\"Skipping already installed package %s\", package_name)\n return\n\n self._handle_package_contents(installable, overwrite=overwrite)\n\n package_file = self._copy(installable)\n _logger.debug(\"Installing package at %s\", str(package_file))\n\n try:\n self.shell.run(f\"/usr/bin/dpkg-deb --extract {package_file!s} {self._install_path!s}\", cwd=Path.cwd())\n except (CommandAbortedError, CommandFailedError):\n package_file.unlink()\n raise\n\n self._settings.add_installed(package_name, package_file)\n\n def uninstall(self, package_name: PackageName, *, force: bool = False) -> None:\n if not self.is_installed(package_name):\n raise PackageIsNotInstalledError(package_name)\n\n if not force:\n dependents = self._get_dependent_packages(package_name)\n if len(dependents) > 0:\n raise RemoveDependencyError(package_name, dependents)\n\n _logger.debug(\"Uninstalling package: %s\", package_name)\n\n contents: dict[str, ArchiveEntry] = inspect_package_contents(\n str(self._settings.installed_packages[package_name]),\n )\n _logger.debug(\"Package Content: %s\", contents)\n\n for package_path in reversed(contents):\n # We need to go bottom-up here, as we maybe empty folders which we\n # can then delete; all packages that I've seen so far had the\n # top-down order so reversed should be bottom-up\n sanitized_path = remove_slash_prefix(package_path)\n installed_file_path = self._install_path / sanitized_path\n\n _logger.debug(\n \"Trying to delete: installed_file_path=%s sanitized_path=%s package_path=%s\",\n installed_file_path,\n sanitized_path,\n package_path,\n )\n\n if not installed_file_path.exists():\n _logger.warning(\"File doesn't exist but was installed: %s\", str(installed_file_path))\n continue\n\n if installed_file_path.is_dir():\n try:\n installed_file_path.rmdir()\n except OSError as e:\n if e.errno != errno.ENOTEMPTY:\n # We only expect directories to not be empty, this is another error\n raise\n\n _logger.debug(\"Directory not empty, leaving it: %s\", str(installed_file_path))\n continue\n\n _logger.debug(\"Removing: %s\", installed_file_path)\n installed_file_path.unlink()\n self._settings.remove_installed(package_name)\n", "repo_name": "HRI-EU/rosenv", "sub_path": "src/rosenv/environment/env.py", "file_name": "env.py", "file_ext": "py", "file_size_in_byte": 15759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 45, "usage_type": "call"}, {"api_name": "typing.NewType", "line_number": 49, "usage_type": "call"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 54, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 56, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 52, "usage_type": "call"}, {"api_name": "deb_pkg_tools.deps.AbstractRelationship", "line_number": 60, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 67, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.inspect_package_contents", "line_number": 68, "usage_type": "call"}, {"api_name": "rosenv.util.paths.remove_slash_prefix", "line_number": 69, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 66, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 78, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 85, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 91, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 91, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 91, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 94, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 95, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 102, "usage_type": "name"}, {"api_name": "rosenv.environment.distro.RosDistribution", "line_number": 104, "usage_type": "name"}, {"api_name": "rosenv.environment.distro.RosDistribution", "line_number": 108, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 111, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 113, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 114, "usage_type": "call"}, {"api_name": "rosenv.environment.distro.parse_distro", "line_number": 115, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 123, "usage_type": "name"}, {"api_name": "rosenv.environment.distro.RosDistribution", "line_number": 123, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 131, "usage_type": "name"}, {"api_name": "yaml.safe_dump", "line_number": 138, "usage_type": "call"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 140, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 140, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 150, "usage_type": "name"}, {"api_name": "rosenv.environment.locate.locate", "line_number": 158, "usage_type": "call"}, {"api_name": "rosenv.environment.shell.RosEnvShell", "line_number": 162, "usage_type": "call"}, {"api_name": "rosenv.rosdep.rosdep.Rosdep", "line_number": 163, "usage_type": "name"}, {"api_name": "rosenv.rosdep.rosdep.Rosdep", "line_number": 168, "usage_type": "call"}, {"api_name": "rosenv.rosdep.rosdep.Rosdep", "line_number": 166, "usage_type": "name"}, {"api_name": "rosenv.environment.distro.RosDistribution", "line_number": 172, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 176, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 180, "usage_type": "name"}, {"api_name": "shutil.copy", "line_number": 187, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 183, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 190, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 193, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 193, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 196, "usage_type": "name"}, {"api_name": "rosenv.rosdep.rosdep.ResolvedPackageName", "line_number": 200, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 200, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.collect_related_packages", "line_number": 201, "usage_type": "call"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 203, "usage_type": "name"}, {"api_name": "rosenv.util.cancelable_executor.CancelableExecutor", "line_number": 206, "usage_type": "call"}, {"api_name": "rosenv.util.cpu_count.get_cpu_count", "line_number": 206, "usage_type": "call"}, {"api_name": "concurrent.futures.as_completed", "line_number": 212, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 215, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 216, "usage_type": "call"}, {"api_name": "os.readlink", "line_number": 216, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 225, "usage_type": "name"}, {"api_name": "rosenv.util.paths.remove_slash_prefix", "line_number": 226, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 229, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 228, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 228, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 240, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 240, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.ArchiveEntry", "line_number": 245, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.inspect_package_contents", "line_number": 245, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 272, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 272, "usage_type": "name"}, {"api_name": "deb_pkg_tools.deps.parse_depends", "line_number": 273, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 271, "usage_type": "name"}, {"api_name": "deb_pkg_tools.deps.AbstractRelationship", "line_number": 271, "usage_type": "name"}, {"api_name": "deb_pkg_tools.utils.find_installed_version", "line_number": 279, "usage_type": "call"}, {"api_name": "deb_pkg_tools.package.parse_filename", "line_number": 284, "usage_type": "call"}, {"api_name": "typing.Mapping", "line_number": 281, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.PackageFile", "line_number": 281, "usage_type": "name"}, {"api_name": "deb_pkg_tools.deps.AbstractRelationship", "line_number": 290, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 291, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.PackageFile", "line_number": 291, "usage_type": "name"}, {"api_name": "deb_pkg_tools.deps.AbstractRelationship", "line_number": 301, "usage_type": "name"}, {"api_name": "deb_pkg_tools.deps.AbstractRelationship", "line_number": 309, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 310, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.PackageFile", "line_number": 310, "usage_type": "name"}, {"api_name": "pathlib.Path.cwd", "line_number": 356, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 356, "usage_type": "name"}, {"api_name": "rosenv.environment.run_command.CommandAbortedError", "line_number": 357, "usage_type": "name"}, {"api_name": "rosenv.environment.run_command.CommandFailedError", "line_number": 357, "usage_type": "name"}, {"api_name": "rosenv.ros_package.package.PackageName", "line_number": 363, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.ArchiveEntry", "line_number": 374, "usage_type": "name"}, {"api_name": "deb_pkg_tools.package.inspect_package_contents", "line_number": 374, "usage_type": "call"}, {"api_name": "rosenv.util.paths.remove_slash_prefix", "line_number": 383, "usage_type": "call"}, {"api_name": "errno.ENOTEMPTY", "line_number": 401, "usage_type": "attribute"}]}
+{"seq_id": "17505776876", "text": "from tcr.wallet import Wallet\nfrom tcr.cardano import Cardano\nfrom tcr.database import Database\nimport logging\nimport argparse\nimport tcr.command\nimport tcr.nftmint\nimport traceback\nimport json\nimport requests\nimport urllib\nimport PIL.Image\nimport io\nimport os\nimport datetime\nimport shutil\nimport pathlib\n\nMINT_PAYMENT = 10000000\n\npotency_lut = {\n 'low': 1,\n 'medium-low': 2,\n 'medium': 3,\n 'medium-high': 4,\n 'high': 5,\n}\n\n# Generates the mutant image.\n#\n# @param normie_path Path to the initial image for the AI algorithm.\n# @param mutant_path The image generated by the AI algorithm should be saved to\n# this location.\n# @param potency A value ranging from low (1) to high (5). The AI algorithm\n# should produce a more mutated image for higher potency.\n#\n# @return true if successful.\ndef mutate_normie(normie_path: str, mutant_path: str, potency: int) -> bool:\n logger = logging.getLogger('mainnet')\n logger.info('Mutate: potency = {}, input = {}'.format(potency, normie_path))\n\n # Run the AI mutation algorithm\n shutil.copyfile(normie_path, mutant_path)\n\n logger.info('Mutate: output = {}'.format(mutant_path))\n return os.path.exists(mutant_path)\n\n# Process the normies package.\n#\n# Unzips the normies file then processes each request in the file by calling\n# mutate_normie. Saves the output in a sub directory and zips everything\n# together.\n#\n# @param network Just a name for the logger. The Cardano network isn't used in\n# this process.\n# @param normies_file zip file package.\n#\ndef process_normies(network: str, normies_file: str) -> None:\n logger = logging.getLogger(network)\n\n if not os.path.exists('normies'):\n os.mkdir('normies')\n\n if not os.path.exists('mutants'):\n os.mkdir('mutants')\n\n normies_subdir = 'normies/{}'.format(pathlib.Path(normies_file).stem)\n if not os.path.exists(normies_subdir):\n os.mkdir(normies_subdir)\n\n mutants_subdir = 'mutants/{}'.format(pathlib.Path(normies_file).stem)\n if not os.path.exists(mutants_subdir):\n os.mkdir(mutants_subdir)\n\n shutil.unpack_archive(normies_file, normies_subdir)\n\n normies_list = None\n with open(normies_subdir + '/normies.json', 'r') as file:\n normies_list = json.load(file)\n if normies_list == None:\n logger.error('Unable to parse {}'.format(\"normies.json\"))\n raise Exception('Unable to parse {}'.format(\"normies.json\"))\n\n mutants_list = []\n for normie in normies_list:\n normie_image = normies_subdir + '/' + normie['normie-image']\n mutant_image = mutants_subdir + '/' + normie['normie-fingerprint'] + '_mutant.png'\n if not mutate_normie(normie_image, mutant_image, normie['potency']):\n logger.error('Error mutating')\n raise Exception('Error mutating')\n\n mutant = {\n 'from': normie['from'],\n 'tx': normie['tx'],\n 'normie-fingerprint': normie['normie-fingerprint'],\n 'mutation-fingerprint': normie['mutation-fingerprint'],\n 'mutant-image': pathlib.Path(mutant_image).name\n }\n mutants_list.append(mutant)\n\n with open(mutants_subdir + '/mutants.json', 'w') as file:\n file.write(json.dumps(mutants_list, indent=4))\n\n shutil.make_archive(mutants_subdir+'_mutants', 'zip', mutants_subdir)\n logger.info('Mutants Package: {}.zip'.format(mutants_subdir+'_mutants'))\n\n# Creates the normies package.\n#\n# This process requires cardano-node and cardano-db-sync to be running.\n#\n# @param network \"mainnet\" or \"testnet\"\n# @param wallet_name A previously created wallet to search for incoming UTXOs.\n# @param requests_file JSON file of requests\ndef process_requests(network: str, wallet_name: str, requests_file: str) -> None:\n if not network in tcr.command.networks:\n raise Exception('Invalid Network: {}'.format(network))\n\n # Open the whitelist to make sure only approved projects are mutated.\n mutate_whitelist = {}\n with open('mutate_whitelist.json', 'r') as file:\n mutate_whitelist = json.load(file)\n if mutate_whitelist == None:\n logger.error('Unable to parse {}'.format(\"mutate_whitelist.json\"))\n raise Exception('Unable to parse {}'.format(\"mutate_whitelist.json\"))\n\n # Open the wallet to monitor for incoming payments and initialize the\n # payment address if necessary\n wallet = Wallet(wallet_name, network)\n if not wallet.exists():\n logger.error('Wallet: <{}> does not exist'.format(wallet_name))\n raise Exception('Wallet: <{}> does not exist'.format(wallet_name))\n\n addr_index = Wallet.ADDRESS_INDEX_MUTATE_REQUEST\n if wallet.get_payment_address(addr_index) == None:\n wallet.setup_address(addr_index)\n\n # General setup\n logger = logging.getLogger(network)\n cardano = Cardano(network, '{}_protocol_parameters.json'.format(network))\n\n tip = cardano.query_tip()\n cardano.query_protocol_parameters()\n tip_slot = tip['slot']\n\n database = Database('{}.ini'.format(network))\n database.open()\n latest_slot = database.query_latest_slot()\n sync_progress = database.query_sync_progress()\n logger.info('Cardano Node Tip Slot: {}'.format(tip_slot))\n logger.info(' Database Latest Slot: {}'.format(latest_slot))\n logger.info('Sync Progress: {}'.format(sync_progress))\n\n # Open all the mutation requests\n requests = None\n with open(requests_file, 'r') as file:\n requests = json.load(file)\n if requests == None:\n logger.error('Unable to parse {}'.format(requests_file))\n raise Exception('Unable to parse {}'.format(requests_file))\n\n # Populate UTXOs with the address of the sender and stake address of the\n # sender\n (utxos, total_lovelace) = cardano.query_utxos(wallet,\n [wallet.get_payment_address(addr_index, delegated=True),\n wallet.get_payment_address(addr_index, delegated=False)])\n for utxo in utxos:\n inputs = database.query_utxo_inputs(utxo['tx-hash'])\n utxo['from'] = inputs[0]['address']\n utxo['from_stake'] = database.query_stake_address(utxo['from'])\n\n # Setup directories for output files\n if not os.path.exists('normie_pkg'):\n os.mkdir('normie_pkg')\n\n subdir = 'normie_pkg/{}'.format(datetime.datetime.today().strftime('%Y_%m_%d'))\n if not os.path.exists(subdir):\n os.mkdir(subdir)\n\n normies_pkg = []\n # Process the request and build the mutation package\n logger.info('Mutation Address: {}'.format(wallet.get_payment_address(addr_index)))\n for r in requests['requests']:\n logger.info('Process: {}: {}/{}'.format(r['name'], r['normie'], r['mutation']))\n normie_owner = database.query_owner_by_fingerprint(r['normie'])\n mutation_owner = database.query_owner_by_fingerprint(r['mutation'])\n\n if normie_owner != mutation_owner:\n logger.error('Owner mismatch for {}: {} != {}'.format(r['name'], r['normie'], r['mutation']))\n continue\n\n (normie_policy, normie_md) = database.query_nft_metadata(r['normie'])\n (mutation_policy, mutation_md) = database.query_nft_metadata(r['mutation'])\n\n if mutation_policy != '7135025a3c23035cdcff4ef8ae3849248afd369466ea1abef61a4157':\n logger.error('Invalid mutation policy: {}'.format(mutation_policy))\n continue\n\n if normie_policy not in mutate_whitelist:\n logger.error('Unapproved normie policy: {}'.format(normie_policy))\n continue\n\n # search for a payment that matches the request\n payment = None\n for utxo in utxos:\n if utxo['from_stake'] == normie_owner:\n payment = utxo\n break\n\n if payment == None:\n logger.error('No payment found')\n continue\n\n # remove this one from the list so it doesn't get processed more than\n # once\n utxos.remove(payment)\n\n if payment['amount'] != MINT_PAYMENT or len(payment['assets']) != 0:\n logger.error('Invalid payment: {} / {}'.format(payment['amount'], payment['assets']))\n continue\n\n cid = normie_md['image'][7:]\n #download_url = 'https://ipfs.io/ipfs/{}'.format(cid)\n download_url = 'https://infura-ipfs.io/ipfs/{}'.format(cid)\n logger.info('Download Normie: {}'.format(download_url))\n\n fd = urllib.request.urlopen(download_url)\n if fd.status != 200:\n logger.info('HTTP Error: {}'.format(fd.status))\n continue\n\n image_file = io.BytesIO(fd.read())\n im = PIL.Image.open(image_file)\n im.save(subdir + '/' + r['normie'] + '.png', format='png')\n normie = {\n 'from': payment['from'],\n 'tx': '{}:{}'.format(payment['tx-hash'], payment['tx-ix']),\n 'potency': potency_lut[mutation_md['potency']],\n 'normie-image': r['normie']+'.png',\n 'normie-fingerprint': r['normie'],\n 'mutation-fingerprint': r['mutation']\n }\n normies_pkg.append(normie)\n\n with open('{}/normies.json'.format(subdir), 'w') as f:\n f.write(json.dumps(normies_pkg, indent=4))\n\n shutil.make_archive(subdir, 'zip', subdir)\n logger.info('Normies Package: {}.zip'.format(subdir))\n\ndef main():\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('--requests', required=False,\n action='store',\n type=str,\n metavar='FILE',\n help='Filename of mutation request JSON.')\n parser.add_argument('--wallet', required=False,\n action='store',\n type=str,\n metavar='NAME',\n help='Wallet name to check payments.')\n parser.add_argument('--normies', required=False,\n action='store',\n type=str,\n metavar='FILE',\n help='The output file from --requests / input for AI algorithm')\n\n network = 'mainnet'\n tcr.nftmint.setup_logging(network, 'mutate')\n\n args = parser.parse_args()\n if args.requests != None:\n if args.wallet == None:\n raise Exception('--wallet required with --requests')\n process_requests(network, args.wallet, args.requests)\n elif args.normies != None:\n process_normies(network, args.normies)\n else:\n raise Exception('--requests or --normies required')\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n print('')\n print('')\n print('EXCEPTION: {}'.format(e))\n print('')\n traceback.print_exc()\n", "repo_name": "kris-76/thecardroom", "sub_path": "tcr/mutate.py", "file_name": "mutate.py", "file_ext": "py", "file_size_in_byte": 10947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 39, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 65, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 69, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 73, "usage_type": "call"}, {"api_name": "shutil.unpack_archive", "line_number": 75, "usage_type": "call"}, {"api_name": "json.load", "line_number": 79, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 97, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 102, "usage_type": "call"}, {"api_name": "shutil.make_archive", "line_number": 104, "usage_type": "call"}, {"api_name": "tcr.wallet.command", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tcr.wallet", "line_number": 115, "usage_type": "name"}, {"api_name": "json.load", "line_number": 121, "usage_type": "call"}, {"api_name": "tcr.wallet.Wallet", "line_number": 128, "usage_type": "call"}, {"api_name": "tcr.wallet.Wallet.ADDRESS_INDEX_MUTATE_REQUEST", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tcr.wallet.Wallet", "line_number": 133, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 138, "usage_type": "call"}, {"api_name": "tcr.cardano.Cardano", "line_number": 139, "usage_type": "call"}, {"api_name": "tcr.database.Database", "line_number": 145, "usage_type": "call"}, {"api_name": "json.load", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 173, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 177, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 226, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 226, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 231, "usage_type": "call"}, {"api_name": "PIL.Image.Image.open", "line_number": 232, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 232, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 232, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 245, "usage_type": "call"}, {"api_name": "shutil.make_archive", "line_number": 247, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 251, "usage_type": "call"}, {"api_name": "tcr.wallet.nftmint.setup_logging", "line_number": 269, "usage_type": "call"}, {"api_name": "tcr.wallet.nftmint", "line_number": 269, "usage_type": "attribute"}, {"api_name": "tcr.wallet", "line_number": 269, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 289, "usage_type": "call"}]}
+{"seq_id": "27405151728", "text": "\"\"\"\nCheck the equivalence of two system of equations.\n\nUsage:\n equiv [-q...] [-1] [-y SYMMETRIES] [-p] A B\n\nOptions:\n -q, --quiet Less output, result is signaled via return value\n -1, --one-way Check only if A implies B, not the other way round\n -y SYM, --symmetry SYM Symmetry group generators\n -p, --pretty Pretty print missing inequalities\n\nThe program's return code signifies whether the two systems are equivalent:\n\n 0 A and B are equivalent\n 1 A does not imply B\n 2 B does not imply A\n 3 neither A implies B nor B implies A\n\nThe values 2 and 3 are only used if --one-way is not in effect.\n\"\"\"\n\nimport sys\n\nfrom docopt import docopt\n\nfrom .core.io import format_ineq, System\nfrom .core.symmetry import group_by_symmetry\n\n\ndef check_implies(sys_a: System, sys_b: System,\n name_a: str, name_b: str,\n *, symmetries: 'SymmetryGroup', quiet=0, pretty=False):\n \"\"\"\n Check if A implies B (system of linear inequalities).\n\n The amount of output is controlled by the value of ``quiet``:\n\n quiet=0 Full output, including the list of missing constraints\n quiet=1 Short output, no list of constraints\n quiet=2 No output at all\n \"\"\"\n lp = sys_a.lp()\n # take one representative from each category:\n groups = group_by_symmetry(symmetries, sys_b.matrix)\n missing = [g for g in groups if not lp.implies(g[0])]\n if missing:\n if quiet <= 1:\n print(\"{} misses {} ({} intrinsic) constraints of {}!\".format(\n name_a, sum(map(len, missing)), len(missing), name_b))\n if quiet == 0:\n print(\"{} misses the following inequalities of {}:\"\n .format(name_a, name_b))\n for constr in missing:\n print(format_ineq(constr[0], pretty, sys_a.columns))\n return False\n else:\n if quiet <= 1:\n print(\"{} implies {}!\".format(name_a, name_b))\n return True\n\n\ndef main(args=None):\n opts = docopt(__doc__)\n sys_a = System.load(opts['A'])\n sys_b = System.load(opts['B'])\n sys_b, _ = sys_b.slice(sys_a.columns, fill=True)\n\n sys_a.update_symmetries(opts['--symmetry'])\n\n status = 0\n kwd = {'quiet': opts['--quiet'],\n 'pretty': opts['--pretty'],\n 'symmetries': sys_a.symmetry_group()}\n if not check_implies(sys_a, sys_b, 'A', 'B', **kwd):\n status |= 1\n if not opts['--one-way']:\n if not check_implies(sys_b, sys_a, 'B', 'A', **kwd):\n status |= 2\n return status\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "repo_name": "coldfix/pystif", "sub_path": "pystif/equiv.py", "file_name": "equiv.py", "file_ext": "py", "file_size_in_byte": 2649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "73", "api": [{"api_name": "core.io.System", "line_number": 31, "usage_type": "name"}, {"api_name": "core.symmetry.group_by_symmetry", "line_number": 45, "usage_type": "call"}, {"api_name": "core.io.format_ineq", "line_number": 55, "usage_type": "call"}, {"api_name": "docopt.docopt", "line_number": 64, "usage_type": "call"}, {"api_name": "core.io.System.load", "line_number": 65, "usage_type": "call"}, {"api_name": "core.io.System", "line_number": 65, "usage_type": "name"}, {"api_name": "core.io.System.load", "line_number": 66, "usage_type": "call"}, {"api_name": "core.io.System", "line_number": 66, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 84, "usage_type": "call"}]}
+{"seq_id": "20075859288", "text": "import PySimpleGUI as sg\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.gridspec import GridSpec\n\ndef create_plot(updated_data):\n income = 0\n expense = 0\n income_category_name = []\n for i in updated_data:\n if i[0] == 'Income':\n income_category_name.append(i[1])\n income_category_name = list(set(income_category_name))\n income_category_value = []\n for i in income_category_name:\n income_category_value.append(0)\n\n expense_category_name = []\n for i in updated_data:\n if i[0] == 'Expense':\n expense_category_name.append(i[1])\n expense_category_name = list(set(expense_category_name))\n expense_category_value = []\n for i in expense_category_name:\n expense_category_value.append(0)\n for i in updated_data:\n if i[0] == 'Income':\n income += int(i[2])\n income_category_value[0] += int(i[2])\n else:\n expense += int(i[2])\n expense_category_value[expense_category_name.index(i[1])] += int(i[2])\n if income_category_value == [] and expense_category_value == []:\n return 0\n colors = ['#8ceacd', '#d95c68']\n fig = plt.figure(constrained_layout=True)\n fig.suptitle(\"Monthly report\")\n gs = GridSpec(2, 2, figure=fig)\n axs = fig.add_subplot(gs[:, 0])\n axs2 = fig.add_subplot(gs[0, 1])\n axs3 = fig.add_subplot(gs[1, 1])\n axs.pie([income, expense], colors=colors, labels=['Income', 'Expense'], autopct='%1.2f%%')\n axs2.pie(income_category_value, labels=income_category_name, autopct='%1.2f%%')\n axs3.pie(expense_category_value, labels=expense_category_name, autopct='%1.2f%%')\n\n layout = [\n [sg.Canvas(size=(1500, 1000), key='-CANVAS-')]\n ]\n window = sg.Window('Monthly summary', layout, finalize=True, element_justification='center')\n figure_canvas = FigureCanvasTkAgg(fig, window['-CANVAS-'].TKCanvas)\n figure_canvas.draw()\n figure_canvas.get_tk_widget().pack(side='top', fill='none', expand=1)\n\n while True:\n event, values = window.read()\n if event == 'Exit' or event == sg.WIN_CLOSED or event == 'Cancel':\n break\n window.close()\n", "repo_name": "Napasakon/dads5001_quiz_1", "sub_path": "show_graph.py", "file_name": "show_graph.py", "file_ext": "py", "file_size_in_byte": 2222, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 38, "usage_type": "call"}, {"api_name": "PySimpleGUI.Canvas", "line_number": 47, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 50, "usage_type": "call"}, {"api_name": "PySimpleGUI.WIN_CLOSED", "line_number": 56, "usage_type": "attribute"}]}
+{"seq_id": "29215712564", "text": "from PyQt5.QtWidgets import QApplication, QWidget, QGridLayout, QHBoxLayout, QPushButton, QFileDialog\nimport sys\n\n\nclass ClassificationAI(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('제목')\n\n self.button1 = QPushButton('open_file')\n self.button1.clicked.connect(self.button1_click)\n\n self.hbox_layout = QHBoxLayout()\n self.hbox_layout.addWidget(self.button1)\n\n self.main_layout = QGridLayout()\n self.main_layout.addLayout(self.hbox_layout, 0, 0, 1, 1)\n\n self.setLayout(self.main_layout)\n\n def button1_click(self):\n # getOpenFileName의 속성들 : self, 창 제목, 초기 이미지 폴더 지정, 필터링(선택 가능한 파일 확장자 지정)\n path, _ = QFileDialog.getOpenFileName(self, '제목', '.', 'Image File (*.*)') # path, _ 둘 다 변수임\n if path == '':\n print('취소')\n else:\n print('PATH : ', path)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n classification_ai = ClassificationAI()\n classification_ai.show()\n sys.exit(app.exec())", "repo_name": "Seojun1/Object-Detection", "sub_path": "lab_gui/09. file_dialog.py", "file_name": "09. file_dialog.py", "file_ext": "py", "file_size_in_byte": 1136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 13, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "5683164926", "text": "from django.db import models\n\n# Create your models here.\n\nclass Shift(models.Model):\n\n months = (\n ('january','January'),\n ('february','Jebruary'),\n ('march','March'),\n ('april','April'),\n ('may','May'),\n ('june','June'),\n ('july','July'),\n ('august','August'),\n ('september','September'),\n ('october','October'),\n ('november','November'),\n ('december','December')\n )\n\n from employee_management_system.models import Employee\n s_year = models.CharField(max_length=4 , null=True)\n s_date = models.DateField(null=False)\n s_start = models.IntegerField(null=False)\n s_end = models.IntegerField(null=False)\n s_month = models.CharField(choices= months , max_length= 200 , null=True)\n s_employee_shift = models.ForeignKey(Employee, on_delete=models.DO_NOTHING , default=None)", "repo_name": "lasith98/wecare", "sub_path": "hrms/shift_management_system/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "employee_management_system.models.Employee", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 28, "usage_type": "attribute"}]}
+{"seq_id": "9946115454", "text": "\"\"\"Training and evaluating the classification model.\"\"\"\n# pyright: reportMissingImports=false\n# pylint: disable=E0401\nimport joblib\nimport typer\nfrom commitcanvas_models.train_model import model as md\nfrom reporover import reporover\n\napp = typer.Typer()\n\n\n@app.callback()\ndef callback():\n \"\"\"Please see the documentation for acceptable command line options.\"\"\"\n\n\nTYPES = \"chore,docs,feat,fix,refactor,test\"\n\n\n@app.command()\ndef train(url: str, save: str, types: str = TYPES):\n \"\"\"Train the model for project specific mode.\"\"\"\n collected_data = reporover.collect(url)\n\n data = md.data_prep(collected_data, types)\n\n train_features, train_labels = md.feature_label_split(data)\n\n pipeline = md.build_pipline()\n pipeline = pipeline.fit(train_features, train_labels)\n\n print(\"saving the model\")\n joblib.dump(pipeline, \"{}/trained_model.pkl\".format(save))\n print(\"saving model complete\")\n", "repo_name": "CommittedTeam/CommitCanvas", "sub_path": "commitcanvas/commit_label/train_model.py", "file_name": "train_model.py", "file_ext": "py", "file_size_in_byte": 911, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "73", "api": [{"api_name": "typer.Typer", "line_number": 9, "usage_type": "call"}, {"api_name": "reporover.reporover.collect", "line_number": 23, "usage_type": "call"}, {"api_name": "reporover.reporover", "line_number": 23, "usage_type": "name"}, {"api_name": "commitcanvas_models.train_model.model.data_prep", "line_number": 25, "usage_type": "call"}, {"api_name": "commitcanvas_models.train_model.model", "line_number": 25, "usage_type": "name"}, {"api_name": "commitcanvas_models.train_model.model.feature_label_split", "line_number": 27, "usage_type": "call"}, {"api_name": "commitcanvas_models.train_model.model", "line_number": 27, "usage_type": "name"}, {"api_name": "commitcanvas_models.train_model.model.build_pipline", "line_number": 29, "usage_type": "call"}, {"api_name": "commitcanvas_models.train_model.model", "line_number": 29, "usage_type": "name"}, {"api_name": "joblib.dump", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "27530755745", "text": "from flask import Flask, render_template, request, session, redirect, url_for, flash\nfrom flask_wtf import CSRFProtect\nfrom sqlalchemy import or_\n\nimport forms\nfrom db import *\nfrom models import *\nfrom helper import date_format\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'super_secret_key'\n#csrf = CSRFProtect(app)\nusersesion = False\nformSearch = forms.FrmSearch()\n\n\ndef is_in_session():\n if 'username' in session:\n return True\n else:\n return False\n\n\ndef create_session(username='', user_id=''):\n session['username'] = username\n session['user_id'] = user_id\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@app.before_request\ndef before_request():\n\n if 'username' not in session and request.endpoint in ['new_post', 'comics', 'series', 'anime', 'games', 'tecnologia']:\n return redirect(url_for('login'))\n elif 'username' in session and request.endpoint in ['login', 'signup']:\n return redirect(url_for('index'))\n\n\n@app.after_request\ndef after_request(response):\n return response\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index(): # put application's code here\n title = \"GeekZone\"\n usersesion = is_in_session()\n\n if usersesion == True:\n username = session['username']\n else:\n username = ''\n\n return render_template('index.html', title=title, usersesion=usersesion, username=username, form=formSearch)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = forms.FrmLogin(request.form)\n\n if request.method == 'POST' and form.validate():\n username = form.username.data\n password = form.password.data\n\n user = Usuario.query.filter_by(user=username).first()\n if user is not None and user.verify_password(password):\n session['username'] = username\n session['user_id'] = user.id_usuario\n return redirect(url_for('index'))\n else:\n error_message = 'Usuario o contraseña incorrectos'\n flash(error_message)\n\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('index'))\n\n\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n form = forms.FrmSignup(request.form)\n\n if request.method == 'POST' and form.validate():\n usuarios(form)\n username = form.username.data\n user = Usuario.query.filter_by(user=username).first()\n session['username'] = username\n session['user_id'] = user.id_usuario\n return render_template('index.html', form=form)\n\n return render_template('signup.html', form=form)\n\n\n@app.route('/password_reset', methods=['GET', 'POST'])\ndef password_reset():\n form = forms.FrmPasswordReset(request.form)\n\n if request.method == 'POST' and form.validate():\n print(\"Formulario válido\")\n print(form.email.data)\n else:\n print(\"Formulario inválido\")\n\n return render_template('password_reset.html', form=form)\n\n\n@app.route('/profile', methods=['GET', 'POST'])\ndef profile():\n return render_template('profile.html')\n\n\n@app.route('/new-post', methods=['GET', 'POST'])\ndef new_post():\n form = forms.FrmNewPost(request.form)\n\n if request.method == 'POST' and form.validate():\n id_usuario = session['user_id']\n publicaciones(form, id_usuario=id_usuario)\n return render_template('index.html', form=form)\n\n return render_template('new_post.html', form=form)\n\n\n@app.route('/games', methods=['GET'])\ndef games():\n title = \"Juegos\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=1).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('games.html', title=title, date_format=date_format, posts=posts, usersesion=True,\n form=formSearch, username=session['username'])\n\n\n@app.route('/anime', methods=['GET'])\ndef anime():\n title = \"Anime\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=2).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('anime.html', title=title, date_format=date_format, posts=posts, form=formSearch,\n usersesion=True, username=session['username'])\n\n\n@app.route('/comics/', methods=['GET'])\n@app.route('/comics/', methods=['GET'])\ndef comics(page=1):\n per_page = 10\n title = \"Comics\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=3).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('comics.html', title=title, posts=posts, date_format=date_format,\n usersesion=True, form=formSearch, username=session['username'])\n\n\n@app.route('/series', methods=['GET'])\ndef series():\n title = \"Series\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=4).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('series.html', title=title, date_format=date_format, posts=posts,\n usersesion=True, form=formSearch, username=session['username'])\n\n\n@app.route('/tecnologia', methods=['GET'])\ndef tecnologia():\n title = \"Tecnologia\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=5).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('tecnologia.html', title=title, date_format=date_format, posts=posts,\n usersesion=True, form=formSearch, username=session['username'])\n\n\n#Ruta para buscar en la base de datos\n@app.route('/search', methods=['GET', 'POST'])\ndef search():\n title = \"GeekZone\"\n form = forms.FrmSearch(request.form)\n\n if request.method == 'POST' and form.validate():\n search = form.search.data\n posts = Publicacion.query.join(Usuario, Categoria).filter(or_(Publicacion.titulo.like('%' + search + '%'),\n Publicacion.content.like('%' + search + '%'),\n Usuario.user.like('%' + search + '%'),\n Publicacion.topic.like(\n '%' + search + '%'))).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('search.html', title=title, posts=posts, date_format=date_format,\n usersesion=True, form=form, username=session['username'])\n\n\nif __name__ == '__main__':\n #csrf.init_app(app) # initialize CSRF protection\n\n app.run()\n", "repo_name": "Murruco/Geekzone", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 7445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "forms.FrmSearch", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.endpoint", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.request.endpoint", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "forms.FrmLogin", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 85, "usage_type": "call"}, {"api_name": "forms.FrmSignup", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 100, "usage_type": "call"}, {"api_name": "forms.FrmPasswordReset", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 118, "usage_type": "call"}, {"api_name": "forms.FrmNewPost", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 123, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 123, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 125, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 125, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 126, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 139, "usage_type": "call"}, {"api_name": "helper.date_format", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 149, "usage_type": "call"}, {"api_name": "helper.date_format", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 150, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 161, "usage_type": "call"}, {"api_name": "helper.date_format", "line_number": 161, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 162, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 171, "usage_type": "call"}, {"api_name": "helper.date_format", "line_number": 171, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 172, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 181, "usage_type": "call"}, {"api_name": "helper.date_format", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 182, "usage_type": "name"}, {"api_name": "forms.FrmSearch", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 189, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 189, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 191, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 191, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 193, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 200, "usage_type": "call"}, {"api_name": "helper.date_format", "line_number": 200, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 201, "usage_type": "name"}]}
+{"seq_id": "70462523437", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nclass Plotter:\n\n fig = plt.figure()\n\n\n def __init__(self, width, height,\n boundary=\"A\", bounds=[-2, 2, -2, 2],\n interval=100, type=\"o\", fill='b', environment='none', title=\"The Nature of Code\"):\n \"\"\"\n\n :param width:\n :param height:\n :param boundary:\n :param bounds:\n :param interval:\n :param type:\n :param fill:\n :param environment:\n :param title:\n :return: Plotter object\n \"\"\"\n\n self.w = width\n self.h = height\n self.type = type\n self.boundary = boundary\n self.interval = interval\n self.bounds = bounds\n self.size = 0\n self.fill = fill\n self.xdata, self.ydata = [], []\n self.points = []\n self.press = None\n self.environment = environment\n self.initialize()\n\n def initialize(self):\n\n xlim, ylim = self.boundaries(self.boundary)\n\n if self.type in ['o', 's', 'p', '*', '+', 'x', 'd', '|', '_', '-', '--', '-.', ':', '.']:\n\n # self.fig_circle.subplots_adjust(left=0, right=1, bottom=0, top=1)\n self.ax = Plotter.fig.add_subplot(111, aspect='equal', autoscale_on=False, xlim=xlim, ylim=ylim)\n\n # particles holds the locations of the particles\n self.objects, = self.ax.plot([], [], '{}{}'.format(self.fill, self.type), ms=6)\n\n # rect is the box edge\n self.rect = plt.Rectangle(self.bounds[::2],\n self.bounds[1] - self.bounds[0],\n self.bounds[3] - self.bounds[2],\n ec='none', lw=2, fc='none')\n\n if self.environment != \"none\":\n pass #todo\n\n def boundaries(self, quad):\n xlim = ylim = (0, 0)\n if \"A\" in quad and \"B\" in quad and \"C\" in quad and \"D\" in quad: # this is all quadrant\n xlim = (-self.w, self.w)\n ylim = (-self.h, self.h)\n\n elif \"A\" in quad and \"B\" in quad and (\"C\" in quad or \"D\" in quad): # this is not possible hence making all quadrant\n xlim = (-self.w, self.w)\n ylim = (-self.h, self.h)\n\n elif \"A\" in quad and \"B\" in quad: # this is first and second quadrant\n xlim = (-self.w, self.w)\n ylim = (0, self.h)\n\n elif \"A\" in quad and \"D\" in quad: # this is first and fourth quadrant\n xlim = (0, self.w)\n ylim = (-self.h, self.h)\n\n elif \"A\" in quad: # this is first quadrant only\n xlim = (0, self.w)\n ylim = (0, self.h)\n\n elif \"B\" in quad: # this is second quadrant only\n xlim = (-self.w, 0)\n ylim = (0, self.h)\n\n elif \"C\" in quad: # this is third quadrant only\n xlim = (-self.w, 0)\n ylim = (-self.h, 0)\n\n elif \"D\" in quad: # this is fourth quadrant only\n xlim = (0, self.w)\n ylim = (-self.h, 0)\n\n return xlim, ylim\n\n def init(self):\n\n del self.xdata[:]\n del self.ydata[:]\n self.objects.set_data([], [])\n self.rect.set_edgecolor('green')\n return self.objects, self.rect\n\n def animate(self, num, data, object):\n\n points = self.get_data()\n self.xdata.append(points[:, 0].tolist())\n self.ydata.append(points[:, 1].tolist())\n\n print('num = {} data = {} line = {}'.format(num, data, object))\n # print('x={} y={}'.format(self.xdata,self.ydata))\n # print('x ={} y ={}'.format(self.x,self.y))\n # print('t ={} y ={}'.format(points[:,0],points[:,1]))\n\n xmin, xmax = self.ax.get_xlim()\n vec = points[:, 0]\n if vec[vec > xmax]:\n self.ax.set_xlim(xmin, 2 * xmax)\n self.ax.figure.canvas.draw()\n\n ms = int(self.fig.dpi * 2 * self.size * self.fig.get_figwidth()\n / np.diff(self.ax.get_xbound())[0])\n\n # update pieces of the animation\n self.rect.set_edgecolor('k')\n self.objects.set_data(points[:, 0], points[:, 1])\n self.objects.set_markersize(ms)\n return self.objects, self.rect\n\n def set_data(self,func):\n self.func = func\n\n def get_data(self):\n if len(self.points) > 1:\n points, self.size = self.func(self.points[len(self.points)-1])\n else:\n points, self.size = self.func()\n return points\n\n def data(self,t=0):\n return self.func\n\n def plotter(self):\n\n anim = animation.FuncAnimation(self.fig, self.animate, fargs=(self.data, self.objects),frames=500,\n interval=self.interval, blit=False, init_func=self.init)\n plt.show()", "repo_name": "jkapila/theNatureofCodeProject", "sub_path": "plot_wrap.py", "file_name": "plot_wrap.py", "file_ext": "py", "file_size_in_byte": 4797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Rectangle", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.diff", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}]}
+{"seq_id": "35588793310", "text": "import torch\nimport torchvision.transforms as transforms\nimport torch.utils.data as data\nfrom datasets import SubDataset, AbstractDomainInterface, ExpandRGBChannels\nimport os\nimport os.path as osp\nimport csv\nimport subprocess\nimport pickle\nfrom PIL import Image\nimport numpy as np\n\nCLASSES = [\"breast_ER_patches\", \"breast_HE_patches\", \"kidney_HE_patches\", \"kidney_MAS_patches\"]\n\nclass ANHIRBase(data.Dataset):\n def __init__(self, source_dir, split, image_path=\"images_96.npy\", label_path=\"labels.pkl\", imsize=224, transforms=None,\n to_gray=False, download=False, extract=True):\n super(ANHIRBase,self).__init__()\n self.index_cache_path = source_dir\n self.source_dir = source_dir\n self.split = split\n self.imsize = imsize\n self.image_path = image_path\n self.to_gray = to_gray\n if transforms is None:\n self.transforms = transforms.Compose([transforms.Resize((imsize, imsize)),\n transforms.ToTensor()])\n else:\n self.transforms = transforms\n assert split in [\"train\", \"valid\", \"test\"]\n if extract:\n self.data = np.load(osp.join(source_dir, image_path))\n self.img_list = np.arange(len(self.data))\n with open(osp.join(source_dir, label_path), \"rb\") as fp:\n str_labels = pickle.load(fp)\n numeric_labels = []\n for l in str_labels:\n label = np.zeros(5, dtype=np.int64)\n label[CLASSES.index(l)] = 1\n numeric_labels.append(label)\n labels = np.stack(numeric_labels)\n self.labels = torch.LongTensor(labels)\n\n if not (osp.exists(osp.join(self.source_dir, 'valid_split.pt'))\n and osp.exists(osp.join(self.source_dir, 'train_split.pt'))\n and osp.exists(osp.join(self.source_dir, 'test_split.pt'))):\n self.generate_split()\n\n self.split_inds = torch.load(osp.join(self.index_cache_path, \"%s_split.pt\"% self.split))\n\n def __len__(self):\n return len(self.split_inds)\n\n def __getitem__(self, item):\n index = self.split_inds[item]\n img = self.data[index]\n img = Image.fromarray(img)\n if not self.to_gray:\n img = self.transforms(img.convert('RGB'))\n else:\n img = self.transforms(img.convert('L'))\n return img, self.labels[index]\n\n def generate_split(self):\n n_total = len(self.img_list)\n train_num = int(0.6*n_total)\n val_num = int(0.7*n_total)\n train_inds = np.arange(train_num)\n val_inds = np.arange(start=train_num, stop=val_num)\n test_inds = np.arange(start=val_num, stop=n_total)\n\n torch.save(train_inds, osp.join(self.index_cache_path, \"train_split.pt\"))\n torch.save(val_inds, osp.join(self.index_cache_path, \"valid_split.pt\"))\n torch.save(test_inds, osp.join(self.index_cache_path, \"test_split.pt\"))\n return\n\n\nclass ANHIR(AbstractDomainInterface):\n dataset_path = \"ANHIR\"\n def __init__(self, root_path=\"./workspace/datasets/ANHIR\", downsample=None, shrink_channels=False, test_length=None, download=False,\n extract=True, doubledownsample=None):\n \"\"\"\n :param leave_out_classes: if a sample has ANY class from this list as positive, then it is removed from indices.\n :param keep_in_classes: when specified, if a sample has None of the class from this list as positive, then it\n is removed from indices..\n \"\"\"\n self.name = \"ANHIR\"\n super(ANHIR, self).__init__()\n self.downsample = downsample\n self.shrink_channels=shrink_channels\n self.max_l = test_length\n cache_path = root_path\n source_path = root_path\n if doubledownsample is not None:\n transform_list = [transforms.Resize(doubledownsample),]\n else:\n transform_list = []\n if downsample is not None:\n print(\"downsampling to\", downsample)\n transform_list += [transforms.Resize((downsample, downsample)),\n transforms.ToTensor(),]\n if self.shrink_channels:\n transform_list += [transforms.Grayscale(),]\n #else:\n # transform_list += [transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),]\n transform = transforms.Compose(transform_list)\n self.image_size = (downsample, downsample)\n else:\n transform_list += [transforms.Resize((224, 224)),\n transforms.ToTensor(), ]\n if self.shrink_channels:\n transform_list += [transforms.Grayscale(),]\n #else:\n # transform_list += [transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),]\n transform = transforms.Compose(transform_list)\n self.image_size = (224, 224)\n\n self.ds_train = ANHIRBase(source_path, \"train\", imsize=self.image_size[0], transforms=transform,\n to_gray=shrink_channels, download=download, extract=extract)\n self.ds_valid = ANHIRBase(source_path, \"valid\", imsize=self.image_size[0], transforms=transform,\n to_gray=shrink_channels, download=download, extract=extract)\n self.ds_test = ANHIRBase(source_path, \"test\", imsize=self.image_size[0], transforms=transform,\n to_gray=shrink_channels, download=download, extract=extract)\n if extract:\n self.D1_train_ind = self.get_filtered_inds(self.ds_train, shuffle=True)\n self.D1_valid_ind = self.get_filtered_inds(self.ds_valid, shuffle=True, max_l=self.max_l)\n self.D1_test_ind = self.get_filtered_inds(self.ds_test, shuffle=True)\n\n self.D2_valid_ind = self.get_filtered_inds(self.ds_train, shuffle=True)\n self.D2_test_ind = self.get_filtered_inds(self.ds_test)\n\n\n def get_filtered_inds(self, basedata: ANHIRBase, shuffle=False, max_l=None):\n output_inds = torch.arange(0, len(basedata)).int()\n if shuffle:\n output_inds = output_inds[torch.randperm(len(output_inds))]\n if max_l is not None:\n if len(output_inds) >max_l:\n output_inds = output_inds[:max_l]\n return output_inds\n\n def get_D1_train(self):\n return SubDataset(self.name, self.ds_train, self.D1_train_ind)\n\n def get_D1_valid(self):\n return SubDataset(self.name, self.ds_valid, self.D1_valid_ind, label=0)\n\n def get_D1_test(self):\n return SubDataset(self.name, self.ds_test, self.D1_test_ind, label=0)\n\n def get_D2_valid(self, D1):\n assert self.is_compatible(D1)\n target_indices = self.D2_valid_ind\n return SubDataset(self.name, self.ds_train, target_indices, label=1, transform=D1.conformity_transform())\n\n def get_D2_test(self, D1):\n assert self.is_compatible(D1)\n target_indices = self.D2_test_ind\n return SubDataset(self.name, self.ds_test, target_indices, label=1, transform=D1.conformity_transform())\n\n def conformity_transform(self):\n target = self.image_size[0]\n if self.shrink_channels:\n return transforms.Compose([ExpandRGBChannels(),\n transforms.ToPILImage(),\n transforms.Grayscale(),\n transforms.Resize((target, target)),\n transforms.ToTensor()\n ])\n else:\n return transforms.Compose([\n ExpandRGBChannels(),\n transforms.ToPILImage(),\n transforms.Resize((target, target)),\n transforms.ToTensor(),\n ])\n\nif __name__ == \"__main__\":\n #data1 = ANHIR(\"workspace\\\\datasets\\\\ANHIR\")\n data1 = ANHIR()\n d1 = data1.get_D1_train()\n import matplotlib.pyplot as plt\n\n print(len(d1))\n for i in range(10):\n x, y = d1[i]\n x2 = x * 0.229 + 0.485\n plt.imshow(x2.numpy().transpose((1, 2, 0)))", "repo_name": "caotians1/OD-test-master", "sub_path": "datasets/ANHIR.py", "file_name": "ANHIR.py", "file_ext": "py", "file_size_in_byte": 8365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "73", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 15, "usage_type": "name"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "name"}, {"api_name": "datasets.AbstractDomainInterface", "line_number": 78, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 95, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 95, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 100, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 100, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 101, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 101, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 103, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 103, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 106, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 106, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 109, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 109, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 110, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 110, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 112, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 112, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 115, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 136, "usage_type": "call"}, {"api_name": "datasets.SubDataset", "line_number": 143, "usage_type": "call"}, {"api_name": "datasets.SubDataset", "line_number": 146, "usage_type": "call"}, {"api_name": "datasets.SubDataset", "line_number": 149, "usage_type": "call"}, {"api_name": "datasets.SubDataset", "line_number": 154, "usage_type": "call"}, {"api_name": "datasets.SubDataset", "line_number": 159, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 164, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 164, "usage_type": "name"}, {"api_name": "datasets.ExpandRGBChannels", "line_number": 164, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 165, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 165, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 166, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 166, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 167, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 167, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 168, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 168, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 171, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 171, "usage_type": "name"}, {"api_name": "datasets.ExpandRGBChannels", "line_number": 172, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 173, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 173, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 174, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 174, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 175, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}]}
+{"seq_id": "19985601639", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import splev, splrep\nfrom scipy import interpolate\nfrom sklearn import neighbors, datasets\nimport pickle\nimport math\n\n# def fun(x_inp, y_inp):\n\n# df = pd.read_excel(\"boundaryPoints_2.xlsx\", header=None)\n# x_points = list(df.iloc[:, 0])\n# y_points = list(df.iloc[:, 1])\n# # print(x_points)\n# # print(y_points)\n# tck = splrep(x_points, y_points)\n\n# # df_h = pd.read_csv(\"BohDataN.csv\",header=None)\n# # df_b = pd.read_csv(\"BorDataN.csv\",header=None)\n# # df_c = pd.read_csv(\"CADataN.csv\",header=None)\n\n# # for i in range(0,324):\n# # df_h[i] = df_h[i].fillna(99999)\n# # df_b[i] = df_b[i].fillna(99999)\n# # df_c[i] = df_c[i].fillna(99999)\n\n# # x1 = []\n# # x2 = []\n# # y1 = []\n# # for i in range(0,150):\n# # for j in range(0,324):\n# # if(df_h.iloc[i,j]==99999):\n# # continue\n# # else:\n# # x2.append(df_h.iloc[i,j])\n# # x1.append(df_b.iloc[i,j])\n# # y1.append(df_c.iloc[i,j])\n\n# # x2 ia hstar\n# # x1 is b star\n# # x = np.array(x1)\n# # y = np.array(x2)\n# # xg, yg = np.meshgrid(x, y,indexing='ij', sparse=True)\n# # z = np.array(y1)\n# # g = interpolate.interp2d(x, y, z, kind='quintic')\n\n# g = pickle.load(open('predictContactAngle.pkl', 'rb'))\n# result = g(x_inp, y_inp)\n\n# x_input = x_inp\n# y_input = y_inp\n# y_out = interpolate.splev(x_input, tck)\n# if y_out-y_input < 0:\n# yo = 1\n# #print(\"Unstable region\")\n# # print(y_out,y_input)\n# elif (x_input < 0 and y_input > 0) or (x_input < 0 and y_input < 0) or (x_input > 0 and y_input < 0):\n# # print(\"Unstable region\")\n# # print(y_out,y_input)\n# yoyo = 2\n\n# else:\n# #print(\"stable region\")\n# #print(\"expected output\",y_out)\n# #print(\"given output\",y_input)\n# contactAngle = g(x_input, y_input)\n# ##warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n# # we need to output znew[0]\n# # print(round(contactAngle[0],0))\n# if result[0]>=95:\n# b_star_array = np.arange(0, 1, 0.005)\n# h_star_array = np.arange(0, 2, 0.005)\n# else:\n# b_star_array = np.arange(0, 4, 0.01)\n# h_star_array = np.arange(0, 4, 0.01)\n# # h_input = 1.5\n# c_angle_input = round(contactAngle[0], 0)\n# # print(b_star_array)\n# contour_b = []\n# contour_h = []\n\n# df = pd.read_csv(\"boundaryThinFilmData.csv\", header=None)\n# x_thinFilm = list(df.iloc[1:, 0])\n# y_thinFilm = list(df.iloc[1:, 1])\n# tck1 = interpolate.splrep(x_thinFilm, y_thinFilm)\n\n# for j in h_star_array:\n# for i in b_star_array:\n# y_out1 = interpolate.splev(i, tck1)\n# if j < y_out1:\n# continue\n# y_out = interpolate.splev(i, tck)\n# if y_out-j < 0:\n# continue\n# contactangle = round(g(i, j)[0], 1)\n# if contactangle == c_angle_input:\n# contour_b.append(i)\n# contour_h.append(j)\n\n# # print(contour_b)\n# # print(contour_h)\n# fin_contourA = []\n# fin_contourB = []\n# x = 0\n# y = 0\n# x_an = 0\n# y_an = 0\n# num = 0\n# ln = len(contour_b)\n# while num < ln:\n# mn = float('inf')\n# for i, j in zip(contour_b, contour_h):\n# if (i-x)*(i-x)+(j-y)*(j-y) < mn:\n# mn = (i-x)*(i-x)+(j-y)*(j-y)\n# x_an = i\n# y_an = j\n# contour_b.remove(x_an)\n# contour_h.remove(y_an)\n# fin_contourA.append(x_an)\n# fin_contourB.append(y_an)\n# x = x_an\n# y = y_an\n# num += 1\n\n# # print(fin_contourA)\n# # print(fin_contourB)\n# # plt.plot(fin_contourA,fin_contourB)\n# # plt.show()\n# fin_contourA = [0] + fin_contourA\n# fin_contourB = [0] + fin_contourB\n# return fin_contourA, fin_contourB\n\n\n# fun(0.25, 1)\n# pickle.dump(tck1,open('contourPredict.pkl','wb'))\n# loaded_model=pickle.load(open('contourPredict.pkl','rb'))\n# inp=[x_input,y_input]\n# print(loaded_model.predict([inp]))\n\n# print(contour_b)\n# print(contour_h)\n\n\ndef fun(x_inp, y_inp):\n df_h = pd.read_csv(\"BohDataN.csv\",header=None)\n df_b = pd.read_csv(\"BorDataN.csv\",header=None)\n df_s = pd.read_csv(\"CADataN.csv\",header=None)\n\n g = pickle.load(open('predictContactAngle.pkl', 'rb'))\n c_angle = round(g(x_inp,y_inp)[0],0)\n\n y = df_h.to_numpy()\n x = df_b.to_numpy()\n z = df_s.to_numpy()\n\n\n fig, ax = plt.subplots(1, 1) \n # plots contour lines\n cs = ax.contour(x, y, z, [c_angle])\n\n ax.set_title('Contour Plot')\n ax.set_xlabel('b')\n ax.set_ylabel('h')\n plt.xlim([0, 3])\n plt.ylim([0, 3])\n\n\n x_coord = []\n y_coord = []\n # print(np.shape(x_coord))\n for item in cs.collections:\n for i in item.get_paths():\n v = i.vertices\n x = v[:, 0]\n x = np.array(x)\n \n y = v[:, 1]\n y = np.array(y)\n # x_coord = x_coord + x\n # y_coord = y_coord + y\n # for i in x:\n for i in range(len(x)):\n if math.isnan(y[i]) == False and math.isnan(x[i]) == False:\n x_coord.append(x[i])\n y_coord.append(y[i])\n else:\n print(x[i],y[i])\n # print(np.shape(x), np.shape(y))\n # plt.plot(x_coord,y_coord)\n # plt.show()\n # print(x_coord)\n # x_coord_new = []\n # y_coord_new = []\n \n # for i in range(len(x_coord)):\n # if math.isnan(y_coord[i]) == False:\n # x_coord_new.append(x_coord[i])\n # y_coord_new.append(y_coord[i])\n # print(x_coord_new,y_coord)\n return x_coord,y_coord\n plt.show()", "repo_name": "Kush223/Engg_Practicum", "sub_path": "contourPredict.py", "file_name": "contourPredict.py", "file_ext": "py", "file_size_in_byte": 5919, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "pandas.read_csv", "line_number": 146, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 147, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 148, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 179, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}]}
+{"seq_id": "71069450519", "text": "import pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport streamlit as st\nimport plotly.graph_objects as go\nimport plotly.figure_factory as ff\n\n# Configurando a página\nst.set_page_config(\n page_title=\"Dashboard MUNIC\",\n page_icon=\"dashboard_adidas/logoadidas.png\",\n layout=\"wide\",\n)\n\npaleta = [\"#63A3B2\", \"#EF7158\"]\namarelo = [\"#FBD178\"]\nazul = [\"#63A3B2\"]\nlaranja = ['#EF7158']\n\n# Função para calcular frequência relativa\ndef freq_rel(coluna):\n contagem = np.sum(coluna)\n calc = round((coluna / contagem) * 100, 2)\n return calc\n\n# Importando dados formatados\ndf = pd.ExcelFile(\"Base_MUNIC_2021 - Atualizada.xlsx\")\n\n# Nome das tabelas atuais\ndf_planilhas = df.sheet_names\n# Importando cada planilha separadamente\ndf_pre = pd.read_excel(df, sheet_name=df_planilhas[0]) # Informações atuais do prefeito\ndf_edu = pd.read_excel(df, sheet_name=df_planilhas[1]) # Educação\ndf_cul = pd.read_excel(df, sheet_name=df_planilhas[2]) # Cultura\ndf_esp = pd.read_excel(df, sheet_name=df_planilhas[3]) # Esporte\ndf_sau = pd.read_excel(df, sheet_name=df_planilhas[4]) # Saúde\n\n# ----------------------------------------------------------------------\n\n# Alterando nome das colunas\ndicionario_pre = df_pre.rename(\n columns={\n \"Mun\": \"Municipio\",\n \"Mpeg02\": \"Mandato 2020\",\n \"Mpeg03\": \"Sexo\",\n \"Mpeg04\": \"Idade\",\n \"Mpeg05\": \"Raça\",\n \"Mpeg051\": \"Autodeclara\",\n \"Mpeg06\": \"Escolaridade\",\n \"Pop estimada 2021\": \"Pop\",\n },\n inplace=True,\n)\n# Alterando valores da variável 'Regiao'\nmapeamento_pre = {\n \"1 - Norte\": \"Norte\",\n \"2 - Nordeste\": \"Nordeste\",\n \"3 - Sudeste\": \"Sudeste\",\n \"4 - Sul\": \"Sul\",\n \"5 - Centro-Oeste\": \"Centro-Oeste\",\n}\ndf_pre[\"Regiao\"] = df_pre[\"Regiao\"].replace(mapeamento_pre)\n\n\n\n# Alterando nome das colunas\ndicionario_edu = df_edu.rename (columns={ 'Mun': 'Municipio',\n 'Medu01': 'Orgao Gestor',\n 'Medu03': 'Sexo',\n 'Medu04': 'Idade',\n 'Medu05': 'Raça',\n 'Medu051': 'Autodeclara',\n 'Medu06': 'Escolaridade'\n}, inplace=True)\n\n# Alterando valores da coluna 'Regiao'\nmapeamento_edu = {\n '1 - Norte': 'Norte',\n '2 - Nordeste': 'Nordeste',\n '3 - Sudeste': 'Sudeste',\n '4 - Sul': 'Sul',\n '5 - Centro-Oeste': 'Centro-Oeste',\n}\ndf_edu['Regiao'] = df_edu['Regiao'].replace(mapeamento_edu)\n\n\n\n# Alterando nome das colunas\ndicionario_cul = df_cul.rename (columns={ 'Mun': 'Municipio',\n 'Mcul01': 'Orgao Gestor',\n 'Mcul03': 'Sexo',\n 'Mcul04': 'Idade',\n 'Mcul05': 'Raça',\n 'Mcul051': 'Autodeclara',\n 'Mcul06': 'Escolaridade'\n}, inplace=True)\n\n# Alterando valores da coluna 'Regiao'\nmapeamento_cul = {\n '1 - Norte': 'Norte',\n '2 - Nordeste': 'Nordeste',\n '3 - Sudeste': 'Sudeste',\n '4 - Sul': 'Sul',\n '5 - Centro-Oeste': 'Centro-Oeste',\n}\ndf_cul['Regiao'] = df_cul['Regiao'].replace(mapeamento_cul)\n\n\n# Alterando nome das colunas\ndicionario_esp = df_esp.rename (columns={ 'Mun': 'Municipio',\n 'Mesp01': 'Orgao Gestor',\n 'Mesp03': 'Sexo',\n 'Mesp04': 'Idade',\n 'Mesp05': 'Raça',\n 'Mesp051': 'Autodeclara',\n 'Mesp06': 'Escolaridade'\n}, inplace=True)\n\n# Alterando valores da coluna 'Regiao'\nmapeamento_esp = {\n '1 - Norte': 'Norte',\n '2 - Nordeste': 'Nordeste',\n '3 - Sudeste': 'Sudeste',\n '4 - Sul': 'Sul',\n '5 - Centro-Oeste': 'Centro-Oeste',\n}\ndf_esp['Regiao'] = df_esp['Regiao'].replace(mapeamento_esp)\n\n# Alterando nome das colunas\ndicionario_sau = df_sau.rename (columns={ 'Mun': 'Municipio',\n 'Msau01': 'Orgao Gestor',\n 'Msau03': 'Sexo',\n 'Msau04': 'Idade',\n 'Msau05': 'Raça',\n 'Msau051': 'Autodeclara',\n 'Msau06': 'Escolaridade'\n}, inplace=True)\n\n# Alterando valores da coluna 'Regiao'\nmapeamento_sau = {\n '1 - Norte': 'Norte',\n '2 - Nordeste': 'Nordeste',\n '3 - Sudeste': 'Sudeste',\n '4 - Sul': 'Sul',\n '5 - Centro-Oeste': 'Centro-Oeste',\n}\ndf_sau['Regiao'] = df_sau['Regiao'].replace(mapeamento_sau)\n\n\n# ----------------------------------------------------------------------\n\n\nst.title('Perfil de mulheres em cargos públicos municipais')\n\n# Usando guias para navegar entre as páginas\ntabs = st.tabs([\"Geral\", \"Prefeituras\", \"Educação\", \"Cultura\", \"Esporte\", \"Saúde\"])\n\n# ----------------------------------------------------------------------\n\nwith tabs[0]:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n colge1,colge2 = st.columns([1, 2])\n\n with colge1:\n\n # Separando os sexos\n pre_fem = pd.DataFrame(df_pre[df_pre['Sexo'] == 'Feminino'])\n pre_mas = pd.DataFrame(df_pre[df_pre['Sexo'] == 'Masculino'])\n # Contando quantos prefeitos por sexo\n contagem_pre_fem = len(pre_fem)\n contagem_pre_mas = len(pre_mas)\n\n # Separando os sexos\n edu_fem = pd.DataFrame(df_edu[df_edu['Sexo'] == 'Feminino'])\n edu_mas = pd.DataFrame(df_edu[df_edu['Sexo'] == 'Masculino'])\n # Contagem\n contagem_edu_fem = len(edu_fem)\n contagem_edu_mas = len(edu_mas)\n\n # Separando os sexos\n cul_fem = pd.DataFrame(df_cul[df_cul['Sexo'] == 'Feminino'])\n cul_mas = pd.DataFrame(df_cul[df_cul['Sexo'] == 'Masculino'])\n # Contagem\n contagem_cul_fem = len(cul_fem)\n contagem_cul_mas = len(cul_mas)\n\n # Separando os sexos\n esp_fem = pd.DataFrame(df_esp[df_esp['Sexo'] == 'Feminino'])\n esp_mas = pd.DataFrame(df_esp[df_esp['Sexo'] == 'Masculino'])\n # Contagem\n contagem_esp_fem = len(esp_fem)\n contagem_esp_mas = len(esp_mas)\n\n # Separando os sexos\n sau_fem = pd.DataFrame(df_sau[df_sau['Sexo'] == 'Feminino'])\n sau_mas = pd.DataFrame(df_sau[df_sau['Sexo'] == 'Masculino'])\n # Contagem\n contagem_sau_fem = len(sau_fem)\n contagem_sau_mas = len(sau_mas)\n \n # Feminino\n geral_fem = (contagem_pre_fem, contagem_edu_fem, contagem_cul_fem, contagem_esp_fem, contagem_sau_fem)\n contagem_geral_fem = np.sum(geral_fem)\n\n # Masculino\n geral_mas = (contagem_pre_mas, contagem_edu_mas, contagem_cul_mas, contagem_esp_mas, contagem_sau_mas)\n contagem_geral_mas = np.sum(geral_mas)\n\n # Dados\n labels = [\"Feminino\", \"Masculino\"]\n valores = [contagem_geral_fem, contagem_geral_mas]\n\n # Criar a figura de pizza\n st.write(\"Distribuição de Gênero nos Orgãos Públicos Municipais\")\n fig_pizza = px.pie(\n names=labels,\n values=valores,\n # title=\"Distribuição de Gênero Geral\",\n width=400,\n hole=0.5\n )\n\n # Exibir a figura no Streamlit\n st.plotly_chart(fig_pizza, use_container_width=False)\n\n with colge2:\n pre_fem = pd.DataFrame(df_pre[df_pre[\"Sexo\"] == \"Feminino\"])\n edu_fem = pd.DataFrame(df_edu[df_edu[\"Sexo\"] == \"Feminino\"])\n cul_fem = pd.DataFrame(df_cul[df_cul[\"Sexo\"] == \"Feminino\"])\n esp_fem = pd.DataFrame(df_esp[df_esp[\"Sexo\"] == \"Feminino\"])\n sau_fem = pd.DataFrame(df_sau[df_sau[\"Sexo\"] == \"Feminino\"])\n geral_fem = pd.concat([pre_fem, edu_fem, cul_fem, esp_fem, sau_fem])\n geral_fem.drop(columns=['Faixa_pop'], inplace=True)\n\n dados = geral_fem['Pop']\n\n intervalos = [\n (0, 5000),\n (5001, 10000),\n (10001, 20000),\n (20001, 50000),\n (50001, 100000),\n (100001, 500000),\n (500001, max(dados))\n ]\n\n def atribuir_faixa_populacional(valor):\n for i, (min_intervalo, max_intervalo) in enumerate(intervalos):\n if min_intervalo <= valor <= max_intervalo:\n return f\"{min_intervalo} - {max_intervalo}\"\n\n # Calcula a contagem da faixa populacional\n geral_fem['Faixa Populacional'] = geral_fem['Pop'].apply(atribuir_faixa_populacional)\n contagem_faixa_populacional = geral_fem['Faixa Populacional'].value_counts().reset_index()\n contagem_faixa_populacional.columns = ['Faixa Populacional', 'Contagem']\n\n # Calcula a frequência relativa\n contagem_faixa_populacional['Frequencia_Relativa'] = contagem_faixa_populacional['Contagem'] / contagem_faixa_populacional['Contagem'].sum()\n\n # Cria um gráfico de barras empilhadas com go.Figure()\n st.write('Faixa Populacional nas Cidades com Liderança Feminina')\n fig = go.Figure()\n\n # Adiciona barras empilhadas ao gráfico\n for i, row in contagem_faixa_populacional.iterrows():\n faixa = row['Faixa Populacional']\n freq_relativa = row['Frequencia_Relativa']\n contagem = row['Contagem']\n hovertext = f'Faixa: {faixa}
Contagem: {contagem}
Frequência Relativa: {freq_relativa:.2%}'\n fig.add_trace(go.Bar(\n x=[faixa],\n y=[freq_relativa], # Use a frequência relativa como altura da barra\n text=[contagem],\n textposition='outside',\n name=faixa,\n hoverinfo='text',\n hovertext=hovertext,\n marker_color=laranja[i % len(laranja)],\n width=1, # Define a largura da barra como 1 (1 unidade da frequência relativa)\n ))\n\n # Atualiza layout do gráfico\n fig.update_layout(\n xaxis_title='Faixa Populacional',\n yaxis_title='Frequência Relativa',\n # title='Faixa Populacional nas Cidades com Liderança Feminina',\n width=930,\n height=450,\n barmode='stack', # Empilha as barras\n )\n\n # Exibe o gráfico no Streamlit\n st.plotly_chart(fig, use_container_width=False)\n\n # Substituir \"Nao informou\" por NaN\n geral_fem['Idade'].replace('Nao informou', pd.NA, inplace=True)\n\n # Excluir as linhas onde a coluna \"Idade\" é NaN\n geral_fem.dropna(subset=['Idade'], inplace=True)\n\n # Transformar as variáveis de object para int\n geral_fem['Idade'] = geral_fem['Idade'].astype(int)\n\n # Definir as faixas etárias\n faixas_etarias = [(0, 30), (30, 60), (60, 100)]\n faixas_etarias_labels = ['19-29', '30-59', '60+']\n\n # Inicialize o Streamlit com três colunas\n col1, col2, col3 = st.columns(3)\n\n for i, faixa in enumerate(faixas_etarias):\n min_age, max_age = faixa\n faixa_label = faixas_etarias_labels[i]\n\n # Filtrar dados para a faixa etária\n filtered_data = geral_fem[(geral_fem['Idade'] >= min_age) & (geral_fem['Idade'] <= max_age)]\n\n # Calcular a frequência absoluta\n freq_absoluta = len(filtered_data)\n\n # Calcular a frequência relativa\n freq_relativa = freq_absoluta / len(geral_fem)\n\n # Criar um gráfico de distplot para a faixa etária atual\n fig = ff.create_distplot([filtered_data['Idade']], [faixa_label], bin_size=5, show_curve=True)\n\n # Configurar layout do gráfico\n fig.update_layout(\n xaxis_title='Idade',\n yaxis_title='Densidade',\n width=800,\n height=500,\n )\n\n # Exibir o título acima do gráfico\n with col1 if i == 0 else col2 if i == 1 else col3:\n st.write(f'Distribuição de Idades - {faixa_label}')\n st.plotly_chart(fig, use_container_width=True)\n\n# ----------------------------------------------------------------------\n\nwith tabs[1]:\n colpre1, colpre2 = st.columns([1,2])\n\n with colpre1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n pre_fem = pd.DataFrame(df_pre[df_pre[\"Sexo\"] == \"Feminino\"])\n\n contagem_pre_fem = len(pre_fem)\n\n valores_pre = [contagem_pre_fem, len(df_pre) - contagem_pre_fem]\n rotulos_pre = [\"Feminino\", \"Masculino\"]\n\n st.write(\"Distribuição de Gênero nas Prefeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_pre[0], valores_pre[0])\n card2.metric(rotulos_pre[1], valores_pre[1])\n\n figpie1 = px.pie(\n values=valores_pre,\n names=rotulos_pre,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_pre_fem = pre_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_pre_fem['Percentagem'] = freq_rel(escolaridade_pre_fem['Frequência acumulada'])\n escolaridade_pre_fem = escolaridade_pre_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_pre_fem['Percentagem'] = escolaridade_pre_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_pre_fem = escolaridade_pre_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das Prefeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_pre_fem[['Escolaridade', 'Percentagem']])\n with colpre2: \n colpre = st.columns(2)\n # Agrupando por estados\n uf_pre = pre_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_pre['Frequência relativa'] = freq_rel(uf_pre['Frequência acumulada'])\n uf_pre = uf_pre.rename(columns={'UF': 'Estados'})\n uf_pre = uf_pre.sort_values(by='Frequência acumulada', ascending=False)\n uf_pre = uf_pre.reset_index()\n uf_pre = uf_pre.drop('index', axis=1)\n\n uf_pre_grafico = uf_pre[['Estados', 'Frequência acumulada']]\n uf_pre_rotulo = uf_pre['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_pre = pre_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_pre['Percentagem'] = freq_rel(rg_pre['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_pre = rg_pre.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres Prefeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_pre['Regiao'][0], rg_pre['Percentagem'][0])\n rg2.metric(rg_pre['Regiao'][1], rg_pre['Percentagem'][1])\n rg3.metric(rg_pre['Regiao'][2], rg_pre['Percentagem'][2])\n rg4.metric(rg_pre['Regiao'][3], rg_pre['Percentagem'][3])\n rg5.metric(rg_pre['Regiao'][4], rg_pre['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres Prefeitas por Estado')\n figbar1 = px.bar(uf_pre,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_pre_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_pre = pre_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_pre.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_pre = pre_fem[pre_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_pre = autodeclara_sim_pre['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_pre.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das Prefeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_pre, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_pre['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_pre['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('Prefeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_pre, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_pre['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------\n\nwith tabs[2]:\n coledu1, coledu2 = st.columns([1,2])\n\n with coledu1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n edu_fem = pd.DataFrame(df_edu[df_edu[\"Sexo\"] == \"Feminino\"])\n\n contagem_edu_fem = len(edu_fem)\n\n valores_edu = [contagem_edu_fem, len(df_edu) - contagem_edu_fem]\n rotulos_edu = [\"Feminino\", \"Masculino\"]\n\n st.write(\"Distribuição de Gênero nas edufeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_edu[0], valores_edu[0])\n card2.metric(rotulos_edu[1], valores_edu[1])\n\n figpie1 = px.pie(\n values=valores_edu,\n names=rotulos_edu,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_edu_fem = edu_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_edu_fem['Percentagem'] = freq_rel(escolaridade_edu_fem['Frequência acumulada'])\n escolaridade_edu_fem = escolaridade_edu_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_edu_fem['Percentagem'] = escolaridade_edu_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_edu_fem = escolaridade_edu_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das edufeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_edu_fem[['Escolaridade', 'Percentagem']])\n with coledu2: \n coledu = st.columns(2)\n # Agrupando por estados\n uf_edu = edu_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_edu['Frequência relativa'] = freq_rel(uf_edu['Frequência acumulada'])\n uf_edu = uf_edu.rename(columns={'UF': 'Estados'})\n uf_edu = uf_edu.sort_values(by='Frequência acumulada', ascending=False)\n uf_edu = uf_edu.reset_index()\n uf_edu = uf_edu.drop('index', axis=1)\n\n uf_edu_grafico = uf_edu[['Estados', 'Frequência acumulada']]\n uf_edu_rotulo = uf_edu['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_edu = edu_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_edu['Percentagem'] = freq_rel(rg_edu['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_edu = rg_edu.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres edufeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_edu['Regiao'][0], rg_edu['Percentagem'][0])\n rg2.metric(rg_edu['Regiao'][1], rg_edu['Percentagem'][1])\n rg3.metric(rg_edu['Regiao'][2], rg_edu['Percentagem'][2])\n rg4.metric(rg_edu['Regiao'][3], rg_edu['Percentagem'][3])\n rg5.metric(rg_edu['Regiao'][4], rg_edu['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres edufeitas por Estado')\n figbar1 = px.bar(uf_edu,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_edu_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_edu = edu_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_edu.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_edu = edu_fem[edu_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_edu = autodeclara_sim_edu['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_edu.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das edufeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_edu, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_edu['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_edu['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('edufeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_edu, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_edu['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------\n\nwith tabs[3]:\n colcul1, colcul2 = st.columns([1,2])\n\n with colcul1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n cul_fem = pd.DataFrame(df_cul[df_cul[\"Sexo\"] == \"Feminino\"])\n\n contagem_cul_fem = len(cul_fem)\n\n valores_cul = [contagem_cul_fem, len(df_cul) - contagem_cul_fem]\n rotulos_cul = [\"Feminino\", \"Masculino\"]\n\n st.write(\"Distribuição de Gênero nas culfeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_cul[0], valores_cul[0])\n card2.metric(rotulos_cul[1], valores_cul[1])\n\n figpie1 = px.pie(\n values=valores_cul,\n names=rotulos_cul,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_cul_fem = cul_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_cul_fem['Percentagem'] = freq_rel(escolaridade_cul_fem['Frequência acumulada'])\n escolaridade_cul_fem = escolaridade_cul_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_cul_fem['Percentagem'] = escolaridade_cul_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_cul_fem = escolaridade_cul_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das culfeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_cul_fem[['Escolaridade', 'Percentagem']])\n with colcul2: \n colcul = st.columns(2)\n # Agrupando por estados\n uf_cul = cul_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_cul['Frequência relativa'] = freq_rel(uf_cul['Frequência acumulada'])\n uf_cul = uf_cul.rename(columns={'UF': 'Estados'})\n uf_cul = uf_cul.sort_values(by='Frequência acumulada', ascending=False)\n uf_cul = uf_cul.reset_index()\n uf_cul = uf_cul.drop('index', axis=1)\n\n uf_cul_grafico = uf_cul[['Estados', 'Frequência acumulada']]\n uf_cul_rotulo = uf_cul['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_cul = cul_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_cul['Percentagem'] = freq_rel(rg_cul['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_cul = rg_cul.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres culfeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_cul['Regiao'][0], rg_cul['Percentagem'][0])\n rg2.metric(rg_cul['Regiao'][1], rg_cul['Percentagem'][1])\n rg3.metric(rg_cul['Regiao'][2], rg_cul['Percentagem'][2])\n rg4.metric(rg_cul['Regiao'][3], rg_cul['Percentagem'][3])\n rg5.metric(rg_cul['Regiao'][4], rg_cul['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres culfeitas por Estado')\n figbar1 = px.bar(uf_cul,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_cul_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_cul = cul_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_cul.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_cul = cul_fem[cul_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_cul = autodeclara_sim_cul['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_cul.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das culfeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_cul, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_cul['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_cul['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('culfeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_cul, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_cul['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------\n\nwith tabs[4]:\n colesp1, colesp2 = st.columns([1,2])\n\n with colesp1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n esp_fem = pd.DataFrame(df_esp[df_esp[\"Sexo\"] == \"Feminino\"])\n\n contagem_esp_fem = len(esp_fem)\n\n valores_esp = [contagem_esp_fem, len(df_esp) - contagem_esp_fem]\n rotulos_esp = [\"Feminino\", \"Masespino\"]\n\n st.write(\"Distribuição de Gênero nas espfeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_esp[0], valores_esp[0])\n card2.metric(rotulos_esp[1], valores_esp[1])\n\n figpie1 = px.pie(\n values=valores_esp,\n names=rotulos_esp,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_esp_fem = esp_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_esp_fem['Percentagem'] = freq_rel(escolaridade_esp_fem['Frequência acumulada'])\n escolaridade_esp_fem = escolaridade_esp_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_esp_fem['Percentagem'] = escolaridade_esp_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_esp_fem = escolaridade_esp_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das espfeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_esp_fem[['Escolaridade', 'Percentagem']])\n with colesp2: \n colesp = st.columns(2)\n # Agrupando por estados\n uf_esp = esp_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_esp['Frequência relativa'] = freq_rel(uf_esp['Frequência acumulada'])\n uf_esp = uf_esp.rename(columns={'UF': 'Estados'})\n uf_esp = uf_esp.sort_values(by='Frequência acumulada', ascending=False)\n uf_esp = uf_esp.reset_index()\n uf_esp = uf_esp.drop('index', axis=1)\n\n uf_esp_grafico = uf_esp[['Estados', 'Frequência acumulada']]\n uf_esp_rotulo = uf_esp['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_esp = esp_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_esp['Percentagem'] = freq_rel(rg_esp['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_esp = rg_esp.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres espfeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_esp['Regiao'][0], rg_esp['Percentagem'][0])\n rg2.metric(rg_esp['Regiao'][1], rg_esp['Percentagem'][1])\n rg3.metric(rg_esp['Regiao'][2], rg_esp['Percentagem'][2])\n rg4.metric(rg_esp['Regiao'][3], rg_esp['Percentagem'][3])\n rg5.metric(rg_esp['Regiao'][4], rg_esp['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres espfeitas por Estado')\n figbar1 = px.bar(uf_esp,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_esp_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_esp = esp_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_esp.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_esp = esp_fem[esp_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_esp = autodeclara_sim_esp['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_esp.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das espfeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_esp, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_esp['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_esp['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('espfeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_esp, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_esp['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------\n\nwith tabs[5]:\n colsau1, colsau2 = st.columns([1,2])\n\n with colsau1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n sau_fem = pd.DataFrame(df_sau[df_sau[\"Sexo\"] == \"Feminino\"])\n\n contagem_sau_fem = len(sau_fem)\n\n valores_sau = [contagem_sau_fem, len(df_sau) - contagem_sau_fem]\n rotulos_sau = [\"Feminino\", \"Massauino\"]\n\n st.write(\"Distribuição de Gênero nas saufeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_sau[0], valores_sau[0])\n card2.metric(rotulos_sau[1], valores_sau[1])\n\n figpie1 = px.pie(\n values=valores_sau,\n names=rotulos_sau,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_sau_fem = sau_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_sau_fem['Percentagem'] = freq_rel(escolaridade_sau_fem['Frequência acumulada'])\n escolaridade_sau_fem = escolaridade_sau_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_sau_fem['Percentagem'] = escolaridade_sau_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_sau_fem = escolaridade_sau_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das saufeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_sau_fem[['Escolaridade', 'Percentagem']])\n with colsau2: \n colsau = st.columns(2)\n # Agrupando por estados\n uf_sau = sau_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_sau['Frequência relativa'] = freq_rel(uf_sau['Frequência acumulada'])\n uf_sau = uf_sau.rename(columns={'UF': 'Estados'})\n uf_sau = uf_sau.sort_values(by='Frequência acumulada', ascending=False)\n uf_sau = uf_sau.reset_index()\n uf_sau = uf_sau.drop('index', axis=1)\n\n uf_sau_grafico = uf_sau[['Estados', 'Frequência acumulada']]\n uf_sau_rotulo = uf_sau['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_sau = sau_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_sau['Percentagem'] = freq_rel(rg_sau['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_sau = rg_sau.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres saufeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_sau['Regiao'][0], rg_sau['Percentagem'][0])\n rg2.metric(rg_sau['Regiao'][1], rg_sau['Percentagem'][1])\n rg3.metric(rg_sau['Regiao'][2], rg_sau['Percentagem'][2])\n rg4.metric(rg_sau['Regiao'][3], rg_sau['Percentagem'][3])\n rg5.metric(rg_sau['Regiao'][4], rg_sau['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres saufeitas por Estado')\n figbar1 = px.bar(uf_sau,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_sau_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_sau = sau_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_sau.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_sau = sau_fem[sau_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_sau = autodeclara_sim_sau['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_sau.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das saufeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_sau, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_sau['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_sau['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('saufeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_sau, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_sau['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------", "repo_name": "mayalajesus/datascience", "sub_path": "analise_basemunic/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 39734, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "85", "api": [{"api_name": "streamlit.set_page_config", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.ExcelFile", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 153, "usage_type": "call"}, {"api_name": "streamlit.tabs", "line_number": 156, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 161, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 163, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 169, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 183, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 190, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 196, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 208, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 215, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 216, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 216, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 228, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 230, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 231, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 232, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 233, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 262, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 263, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 263, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 271, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 271, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 294, "usage_type": "call"}, {"api_name": "pandas.NA", "line_number": 297, "usage_type": "attribute"}, {"api_name": "streamlit.columns", "line_number": 310, "usage_type": "call"}, {"api_name": "plotly.figure_factory.create_distplot", "line_number": 326, "usage_type": "call"}, {"api_name": "plotly.figure_factory", "line_number": 326, "usage_type": "name"}, {"api_name": "streamlit.write", "line_number": 338, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 339, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 344, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 347, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 349, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 356, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 357, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 362, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 362, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 369, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 377, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 378, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 379, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 381, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 399, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 400, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 407, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 409, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 410, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 410, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 425, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 427, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 440, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 440, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 455, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 455, "usage_type": "name"}, {"api_name": "streamlit.columns", "line_number": 470, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 473, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 475, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 482, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 483, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 488, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 488, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 495, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 503, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 504, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 505, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 507, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 525, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 526, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 533, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 535, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 536, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 536, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 551, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 553, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 566, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 566, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 581, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 581, "usage_type": "name"}, {"api_name": "streamlit.columns", "line_number": 596, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 599, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 601, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 608, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 609, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 614, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 614, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 621, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 629, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 630, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 631, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 633, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 651, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 652, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 659, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 661, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 662, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 662, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 677, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 679, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 692, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 692, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 707, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 707, "usage_type": "name"}, {"api_name": "streamlit.columns", "line_number": 722, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 725, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 727, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 734, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 735, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 740, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 740, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 747, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 755, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 756, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 757, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 759, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 777, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 778, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 785, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 787, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 788, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 788, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 803, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 805, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 818, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 818, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 833, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 833, "usage_type": "name"}, {"api_name": "streamlit.columns", "line_number": 848, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 851, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 853, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 860, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 861, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 866, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 866, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 873, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 881, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 882, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 883, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 885, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 903, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 904, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 911, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 913, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 914, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 914, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 929, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 931, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 944, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 944, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 959, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 959, "usage_type": "name"}]}
+{"seq_id": "18027225230", "text": "from sympy.core.singleton import S\n\nfrom sympy.core.numbers import pi\n\nfrom sympy.physics.units import DimensionSystem, hertz, kilogram\nfrom sympy.physics.units.definitions import (\n G, Hz, J, N, Pa, W, c, g, kg, m, s, meter, gram, second, newton,\n joule, watt, pascal)\nfrom sympy.physics.units.definitions.dimension_definitions import (\n acceleration, action, energy, force, frequency, momentum,\n power, pressure, velocity, length, mass, time)\nfrom sympy.physics.units.prefixes import PREFIXES, prefix_unit\nfrom sympy.physics.units.prefixes import (\n kibi, mebi, gibi, tebi, pebi, exbi\n)\nfrom sympy.physics.units.definitions import (\n cd, K, coulomb, volt, ohm, siemens, farad, henry, tesla, weber, dioptre,\n lux, katal, gray, becquerel, inch, hectare, liter, julian_year,\n gravitational_constant, speed_of_light, elementary_charge, planck, hbar,\n electronvolt, avogadro_number, avogadro_constant, boltzmann_constant,\n stefan_boltzmann_constant, atomic_mass_constant, molar_gas_constant,\n faraday_constant, josephson_constant, von_klitzing_constant,\n acceleration_due_to_gravity, magnetic_constant, vacuum_permittivity,\n vacuum_impedance, coulomb_constant, atmosphere, bar, pound, psi, mmHg,\n milli_mass_unit, quart, lightyear, astronomical_unit, planck_mass,\n planck_time, planck_temperature, planck_length, planck_charge,\n planck_area, planck_volume, planck_momentum, planck_energy, planck_force,\n planck_power, planck_density, planck_energy_density, planck_intensity,\n planck_angular_frequency, planck_pressure, planck_current, planck_voltage,\n planck_impedance, planck_acceleration, bit, byte, kibibyte, mebibyte,\n gibibyte, tebibyte, pebibyte, exbibyte, curie, rutherford, radian, degree,\n steradian, angular_mil, atomic_mass_unit, gee, kPa, ampere, u0, kelvin,\n mol, mole, candela, electric_constant, boltzmann, angstrom\n)\n\n\ndimsys_length_weight_time = DimensionSystem([\n # Dimensional dependencies for MKS base dimensions\n length,\n mass,\n time,\n], dimensional_dependencies={\n # Dimensional dependencies for derived dimensions\n \"velocity\": {\"length\": 1, \"time\": -1},\n \"acceleration\": {\"length\": 1, \"time\": -2},\n \"momentum\": {\"mass\": 1, \"length\": 1, \"time\": -1},\n \"force\": {\"mass\": 1, \"length\": 1, \"time\": -2},\n \"energy\": {\"mass\": 1, \"length\": 2, \"time\": -2},\n \"power\": {\"length\": 2, \"mass\": 1, \"time\": -3},\n \"pressure\": {\"mass\": 1, \"length\": -1, \"time\": -2},\n \"frequency\": {\"time\": -1},\n \"action\": {\"length\": 2, \"mass\": 1, \"time\": -1},\n \"area\": {\"length\": 2},\n \"volume\": {\"length\": 3},\n})\n\n\nOne = S.One\n\n\n# Base units:\ndimsys_length_weight_time.set_quantity_dimension(meter, length)\ndimsys_length_weight_time.set_quantity_scale_factor(meter, One)\n\n# gram; used to define its prefixed units\ndimsys_length_weight_time.set_quantity_dimension(gram, mass)\ndimsys_length_weight_time.set_quantity_scale_factor(gram, One)\n\ndimsys_length_weight_time.set_quantity_dimension(second, time)\ndimsys_length_weight_time.set_quantity_scale_factor(second, One)\n\n# derived units\n\ndimsys_length_weight_time.set_quantity_dimension(newton, force)\ndimsys_length_weight_time.set_quantity_scale_factor(newton, kilogram*meter/second**2)\n\ndimsys_length_weight_time.set_quantity_dimension(joule, energy)\ndimsys_length_weight_time.set_quantity_scale_factor(joule, newton*meter)\n\ndimsys_length_weight_time.set_quantity_dimension(watt, power)\ndimsys_length_weight_time.set_quantity_scale_factor(watt, joule/second)\n\ndimsys_length_weight_time.set_quantity_dimension(pascal, pressure)\ndimsys_length_weight_time.set_quantity_scale_factor(pascal, newton/meter**2)\n\ndimsys_length_weight_time.set_quantity_dimension(hertz, frequency)\ndimsys_length_weight_time.set_quantity_scale_factor(hertz, One)\n\n# Other derived units:\n\ndimsys_length_weight_time.set_quantity_dimension(dioptre, 1 / length)\ndimsys_length_weight_time.set_quantity_scale_factor(dioptre, 1/meter)\n\n# Common volume and area units\n\ndimsys_length_weight_time.set_quantity_dimension(hectare, length**2)\ndimsys_length_weight_time.set_quantity_scale_factor(hectare, (meter**2)*(10000))\n\ndimsys_length_weight_time.set_quantity_dimension(liter, length**3)\ndimsys_length_weight_time.set_quantity_scale_factor(liter, meter**3/1000)\n\n\n# Newton constant\n# REF: NIST SP 959 (June 2019)\n\ndimsys_length_weight_time.set_quantity_dimension(gravitational_constant, length ** 3 * mass ** -1 * time ** -2)\ndimsys_length_weight_time.set_quantity_scale_factor(gravitational_constant, 6.67430e-11*m**3/(kg*s**2))\n\n# speed of light\n\ndimsys_length_weight_time.set_quantity_dimension(speed_of_light, velocity)\ndimsys_length_weight_time.set_quantity_scale_factor(speed_of_light, 299792458*meter/second)\n\n\n# Planck constant\n# REF: NIST SP 959 (June 2019)\n\ndimsys_length_weight_time.set_quantity_dimension(planck, action)\ndimsys_length_weight_time.set_quantity_scale_factor(planck, 6.62607015e-34*joule*second)\n\n# Reduced Planck constant\n# REF: NIST SP 959 (June 2019)\n\ndimsys_length_weight_time.set_quantity_dimension(hbar, action)\ndimsys_length_weight_time.set_quantity_scale_factor(hbar, planck / (2 * pi))\n\n\n__all__ = [\n 'mmHg', 'atmosphere', 'newton', 'meter', 'vacuum_permittivity', 'pascal',\n 'magnetic_constant', 'angular_mil', 'julian_year', 'weber', 'exbibyte',\n 'liter', 'molar_gas_constant', 'faraday_constant', 'avogadro_constant',\n 'planck_momentum', 'planck_density', 'gee', 'mol', 'bit', 'gray', 'kibi',\n 'bar', 'curie', 'prefix_unit', 'PREFIXES', 'planck_time', 'gram',\n 'candela', 'force', 'planck_intensity', 'energy', 'becquerel',\n 'planck_acceleration', 'speed_of_light', 'dioptre', 'second', 'frequency',\n 'Hz', 'power', 'lux', 'planck_current', 'momentum', 'tebibyte',\n 'planck_power', 'degree', 'mebi', 'K', 'planck_volume',\n 'quart', 'pressure', 'W', 'joule', 'boltzmann_constant', 'c', 'g',\n 'planck_force', 'exbi', 's', 'watt', 'action', 'hbar', 'gibibyte',\n 'DimensionSystem', 'cd', 'volt', 'planck_charge', 'angstrom',\n 'dimsys_length_weight_time', 'pebi', 'vacuum_impedance', 'planck',\n 'farad', 'gravitational_constant', 'u0', 'hertz', 'tesla', 'steradian',\n 'josephson_constant', 'planck_area', 'stefan_boltzmann_constant',\n 'astronomical_unit', 'J', 'N', 'planck_voltage', 'planck_energy',\n 'atomic_mass_constant', 'rutherford', 'elementary_charge', 'Pa',\n 'planck_mass', 'henry', 'planck_angular_frequency', 'ohm', 'pound',\n 'planck_pressure', 'G', 'avogadro_number', 'psi', 'von_klitzing_constant',\n 'planck_length', 'radian', 'mole', 'acceleration',\n 'planck_energy_density', 'mebibyte', 'length',\n 'acceleration_due_to_gravity', 'planck_temperature', 'tebi', 'inch',\n 'electronvolt', 'coulomb_constant', 'kelvin', 'kPa', 'boltzmann',\n 'milli_mass_unit', 'gibi', 'planck_impedance', 'electric_constant', 'kg',\n 'coulomb', 'siemens', 'byte', 'atomic_mass_unit', 'm', 'kibibyte',\n 'kilogram', 'lightyear', 'mass', 'time', 'pebibyte', 'velocity',\n 'ampere', 'katal',\n]\n", "repo_name": "sympy/sympy", "sub_path": "sympy/physics/units/systems/length_weight_time.py", "file_name": "length_weight_time.py", "file_ext": "py", "file_size_in_byte": 7004, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11561, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sympy.physics.units.DimensionSystem", "line_number": 37, "usage_type": "call"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.length", "line_number": 39, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.mass", "line_number": 40, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.time", "line_number": 41, "usage_type": "name"}, {"api_name": "sympy.core.singleton.S.One", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sympy.core.singleton.S", "line_number": 58, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 62, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.length", "line_number": 62, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 63, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.gram", "line_number": 66, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.mass", "line_number": 66, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.gram", "line_number": 67, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.second", "line_number": 69, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.time", "line_number": 69, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.second", "line_number": 70, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.newton", "line_number": 74, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.force", "line_number": 74, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.newton", "line_number": 75, "usage_type": "argument"}, {"api_name": "sympy.physics.units.kilogram", "line_number": 75, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 75, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.second", "line_number": 75, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.joule", "line_number": 77, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.energy", "line_number": 77, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.joule", "line_number": 78, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.newton", "line_number": 78, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 78, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.watt", "line_number": 80, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.power", "line_number": 80, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.watt", "line_number": 81, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.joule", "line_number": 81, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.second", "line_number": 81, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.pascal", "line_number": 83, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.pressure", "line_number": 83, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.pascal", "line_number": 84, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.newton", "line_number": 84, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 84, "usage_type": "name"}, {"api_name": "sympy.physics.units.hertz", "line_number": 86, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.frequency", "line_number": 86, "usage_type": "argument"}, {"api_name": "sympy.physics.units.hertz", "line_number": 87, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dioptre", "line_number": 91, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.length", "line_number": 91, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.dioptre", "line_number": 92, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 92, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.hectare", "line_number": 96, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.length", "line_number": 96, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.hectare", "line_number": 97, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 97, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.liter", "line_number": 99, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.length", "line_number": 99, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.liter", "line_number": 100, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 100, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.gravitational_constant", "line_number": 106, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.length", "line_number": 106, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.mass", "line_number": 106, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.time", "line_number": 106, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.gravitational_constant", "line_number": 107, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.m", "line_number": 107, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.kg", "line_number": 107, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.s", "line_number": 107, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.speed_of_light", "line_number": 111, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.velocity", "line_number": 111, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.speed_of_light", "line_number": 112, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.meter", "line_number": 112, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.second", "line_number": 112, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.planck", "line_number": 118, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.action", "line_number": 118, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.planck", "line_number": 119, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.joule", "line_number": 119, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.second", "line_number": 119, "usage_type": "name"}, {"api_name": "sympy.physics.units.definitions.hbar", "line_number": 124, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.dimension_definitions.action", "line_number": 124, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.hbar", "line_number": 125, "usage_type": "argument"}, {"api_name": "sympy.physics.units.definitions.planck", "line_number": 125, "usage_type": "name"}, {"api_name": "sympy.core.numbers.pi", "line_number": 125, "usage_type": "name"}]}
+{"seq_id": "21343096770", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom rake_nltk import Rake\nfrom textblob import TextBlob\nimport profanity_check\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk import sent_tokenize\nimport re\n\nclass Analytics(object):\n \n def __init__(self):\n pass\n \n def merge_numbers_intext(self, text):\n '''\n merge adjacent numbers occureence into the text which might got \n splited coz of punctuations presence eg. 1,200,345 -> 1 200 345 \n '''\n word_list = text.split()\n new_word_list = []\n number = \"\"\n for word in word_list:\n if word.isnumeric():\n number+=word\n else:\n if len(number):\n new_word_list.append(number)\n number=\"\"\n new_word_list.append(word)\n return \" \".join(new_word_list)\n \n \n #def get_factual_info_0(text):\n # labels=[\"numbers\",\"data\",\"death\",\"recovery\",\"count\",\"discharged\",\"cases\",\"rising\",\"droping\"]\n # words = stopwords.words(\"english\")+[\"covid\",\"corona\",\"covid19\",\"19\"]\n # stemmer = PorterStemmer()\n # cleaning_lambda = lambda x: \" \".join([stemmer.stem(i) \n # for i in re.sub(\"[^a-zA-Z0-9]\", \" \", x.lower()).split()\n # if i not in words])\n # cleaned_text = cleaning_lambda(text)\n # cleaned_text = merge_numbers_intext(cleaned_text)\n # cleaned_labels = [cleaning_lambda(label) for label in labels]\n # numb_exists = re.findall(\"\\d+\",cleaned_text) # count all the numbers fro the text\n # labels_exists = re.findall(r\"({})\".format(\"|\".join(cleaned_labels)),cleaned_text) # getting lables count which might indicate factual information\n # numb_cnt = len(numb_exists)\n # labels_cnt = len(labels_exists)\n # word_cnt = len(cleaned_text.split())\n # return (numb_cnt+labels_cnt)/word_cnt if word_cnt else 0\n \n def get_factual_info(self, text):\n labels=[\"numbers\",\"data\",\"death\",\"recovery\",\"count\",\"discharged\",\"cases\",\"rising\",\"droping\"]\n words = stopwords.words(\"english\")+[\"covid\",\"corona\",\"covid19\",\"19\"]\n sents = sent_tokenize(text)\n stemmer = PorterStemmer()\n cleaning_lambda = lambda x: \" \".join([stemmer.stem(i)\n for i in re.sub(\"[^a-zA-Z0-9]\", \" \", x.lower()).split()\n if i not in words])\n \n cleaned_labels = [stemmer.stem(label.lower()) for label in labels]\n factual_info = 0\n for sent in sents:\n cleaned_text = cleaning_lambda(sent)\n cleaned_text = self.merge_numbers_intext(cleaned_text)\n numb_exists = re.findall(\"\\d+\",cleaned_text) # count all the numbers fro the text\n labels_exists = re.findall(r\"({})\".format(\"|\".join(cleaned_labels)),cleaned_text) # getting lables count which might indicate factual information\n if numb_exists or labels_exists:\n factual_info+=1\n return (factual_info/len(sents))\n \n # KeyWords\n def get_keywords(self, text):\n r = Rake(max_length = 1)\n r.extract_keywords_from_text(text)\n return r.get_ranked_phrases_with_scores()[:20]\n \n # KeyPhrases\n def get_keyphrases(self, text):\n r = Rake(min_length = 2, max_length = 10)\n r.extract_keywords_from_text(text)\n return r.get_ranked_phrases_with_scores()[:5]\n \n # Labeles\n def get_labels(self, text):\n labels_list = {\n \"Vaccine\":[\"vaccine\"],\n \"Lockdown\":[\"lockdown\"],\n \"Safety measures\": [\"precaution\", \"social distancing\", \"washing hands\", \"safety measures\", \"ppe\", \"mask\",\"quarantine\"], \n \"Travel\":[\"flight\", \"travel\", \"train\", \"transport\", \"visa\",\"departure\",\"arrival\"],\n \"Testing\":[\"rt-pcr\", \"pcr\", \"antigen\", \"antibody test\", \"serology test\", \"diagnostic\",\"rtpcr\"],\n \"Official announcements/Rules and regulations\":[\"official\", \"rules\", \"regulation\", \"announced\", \"announcement\", \"government\"]}\n \n stemmer = PorterStemmer()\n words = stopwords.words(\"english\")\n cleaning_lambda = lambda x: \" \".join([stemmer.stem(i) \n for i in re.sub(\"[^a-zA-Z]\", \" \", x.lower()).split() \n if i not in words])\n cleaned_text = cleaning_lambda(text)\n cleaned_labels = {key:[cleaning_lambda(word) for word in values] \n for key, values in labels_list.items()} \n labels_count = {key:\n sum([cleaned_text.count(value) for value in values]) \n for key, values in cleaned_labels.items()}\n \n return labels_count\n \n def get_info(self, df):\n new_df = df.copy()\n \n # Polarity\n new_df['polarity'] = new_df['content'].apply(lambda x : \n TextBlob(str(x).lower().replace(\"positive\",\"infected\")).sentiment.polarity)\n print(\"Collected Polarity Information\")\n \n # Subjectivity\n new_df['subjectivity'] = new_df['content'].apply(lambda x : \n TextBlob(str(x).lower().replace(\"positive\",\"infected\")).sentiment.subjectivity)\n print(\"Collected Subjectivity information\")\n \n # profanity\n new_df['profanity'] = new_df['content'].apply(lambda x : \n round(profanity_check.predict_prob([str(x)])[0],2))\n print(\"Collected Profanity information\")\n\n \n # KeyPhrases\n new_df['keywords'] = new_df['content'].apply(self.get_keywords)\n print(\"Collected Keywords\")\n \n # KeyPhrases\n new_df['keyphrases'] = new_df['content'].apply(self.get_keyphrases)\n print(\"Colected KeyPhrases\")\n \n # Factual Info\n new_df[\"factual_info\"] = new_df['content'].apply(self.get_factual_info)\n print(\"Collected Numberic Info\")\n \n # Labels information\n new_df[\"labels_info\"] = new_df['content'].apply(self.get_labels)\n print(\"Collected content info\")\n \n return new_df\n\n\n\n", "repo_name": "mayurchhabra89/webscraper", "sub_path": "flaskr/analytics.py", "file_name": "analytics.py", "file_ext": "py", "file_size_in_byte": 6070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 54, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 54, "usage_type": "name"}, {"api_name": "nltk.sent_tokenize", "line_number": 55, "usage_type": "call"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 56, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 58, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 66, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 67, "usage_type": "call"}, {"api_name": "rake_nltk.Rake", "line_number": 74, "usage_type": "call"}, {"api_name": "rake_nltk.Rake", "line_number": 80, "usage_type": "call"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 94, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 95, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 95, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 97, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 113, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 118, "usage_type": "call"}, {"api_name": "profanity_check.predict_prob", "line_number": 123, "usage_type": "call"}]}
+{"seq_id": "74592629354", "text": "import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport numpy as np\r\nimport pandas as pd\r\nimport plotly.graph_objs as go\r\nimport plotly.express as px\r\nfrom plotly.subplots import make_subplots\r\nfrom plotly import tools\r\n\r\n######################################################Data##############################################################\r\n\r\ndf = pd.read_csv('data/table.csv')\r\n\r\nEurope = ['Albania','Armenia','Austria','Azerbaijan','Belarus','Belgium','Bulgaria','Croatia','Cyprus',\r\n 'Czech Republic','Denmark','Estonia','Finland','France','Georgia','Germany','Greece','Hungary',\r\n 'Iceland','Ireland','Italy','Kazakhstan','Latvia','Lithuania','Luxembourg','Malta','Moldova',\r\n 'Netherlands','Norway','Poland','Portugal','Romania','Russian Federation','Slovak Republic',\r\n 'Slovenia','Spain','Sweden','Switzerland','Turkey','Ukraine','United Kingdom']\r\n\r\ncolor_array = [\"#999999\", \"#E69F00\", \"#56B4E9\", \"#009E73\", \"#F0E442\", \"#0072B2\", \"#D55E00\", \"#CC79A7\", '#9a6a00',\r\n '#0047e6','#00523b', '#893c00']\r\n\r\n\r\n######################################################Interactive Components############################################\r\n\r\ncountry_options = [dict(label=country, value=country) for country in Europe]\r\n\r\ncontinent_options = [dict(label=continent, value=continent) for continent in df['Continent_Name'].unique()]\r\n\r\n\r\n##################################################APP###############################################################\r\n\r\napp = dash.Dash(__name__)\r\nserver=app.server\r\napp.layout = html.Div([\r\n\r\n html.Div([\r\n html.H1(\r\n \"European Tourism\",\r\n style={\"margin-bottom\": \"0px\"},\r\n ),\r\n\r\n html.H3(\r\n \"An Overview\", style={\"margin-top\": \"0px\"}\r\n )\r\n ], className='Title'),\r\n\r\n html.Div([\r\n\r\n html.Div([\r\n html.Label('Country Choice'),\r\n dcc.Dropdown(\r\n id='country_drop',\r\n options=country_options,\r\n value=['Portugal', 'France'],\r\n multi=True\r\n ),\r\n\r\n html.Br(),\r\n\r\n html.Label('Continent Choice'),\r\n dcc.Dropdown(\r\n id='continent_drop',\r\n options=continent_options,\r\n value=['Europe','World'],\r\n multi=True\r\n ),\r\n\r\n html.Br(),\r\n\r\n html.Label('Year Slider'),\r\n dcc.Slider(\r\n id='year_slider',\r\n min=df['Years'].min(),\r\n max=2016,\r\n marks={str(i): '{}'.format(str(i)) for i in [2000, 2002, 2004, 2006, 2008, 2010, 2012, 2014, 2016]},\r\n value=2016,\r\n step=1,\r\n included=False\r\n ),\r\n\r\n html.Br(),\r\n\r\n dcc.Markdown(\"With this dashboard we wish to tell a story about European \"\r\n \"tourism. We showcase the prominence of tourism on countries' GDP, \"\r\n \"Europe's tourism revenue compared to other continents, the number of \"\r\n \"arrivals and how it's been growing over the years, how each tourism variable \"\r\n \"relates to each other and finally the influence of tourism on jobs.\"\r\n ),\r\n ], className='column2 pretty'),\r\n\r\n html.Div([dcc.Graph(id='bubbles_graph')], className='column1 pretty')\r\n ], className='row'),\r\n\r\n html.Div([\r\n\r\n html.Div([dcc.Graph(id='line_graph')], className='column3 pretty'),\r\n\r\n html.Div([dcc.Graph(id='choropleth')], className='column4 pretty')\r\n\r\n ], className='row'),\r\n\r\n html.Div([\r\n\r\n html.Div([dcc.Graph(id='radar_graph')], className='column3 pretty'),\r\n\r\n html.Div([dcc.Graph(id='subplot_graph')], className='column4 pretty')\r\n\r\n ], className='row'),\r\n\r\n html.Div([\r\n html.H6(\r\n \"Work by: Andreia Antunes [M20190876], Fernanda Zippinotti [M20190232], Lara Neves [20190867]\", style={\"margin-top\": \"0px\"}\r\n )\r\n ], className='Title')\r\n\r\n])\r\n\r\n######################################################Callbacks#########################################################\r\n\r\n@app.callback(\r\n [\r\n Output(\"choropleth\", \"figure\"),\r\n Output(\"line_graph\", \"figure\"),\r\n Output(\"bubbles_graph\", \"figure\"),\r\n Output(\"radar_graph\", \"figure\"),\r\n Output(\"subplot_graph\", \"figure\")\r\n ],\r\n [\r\n Input(\"year_slider\", \"value\"),\r\n Input(\"country_drop\", \"value\"),\r\n Input(\"continent_drop\", \"value\")\r\n ]\r\n)\r\ndef plots(year, countries, continent):\r\n\r\n #############################################First Choropleth######################################################\r\n df_EU = df.loc[df['Country_Name'].isin(Europe)]\r\n df_EU_0 = df_EU.loc[df_EU['Years']== year]\r\n data_choropleth = dict(type='choropleth',\r\n locations=df_EU_0['Country_Name'],\r\n locationmode='country names',\r\n text=df_EU_0['Country_Name'],\r\n colorscale='YlGnBu',\r\n colorbar=dict(title='Number of Arrivals'),\r\n #hovertemplate='Country: %{text}
' + str(gas.replace('_', ' ')) + ': %{z}',\r\n z=df_EU_0['Arrivals'])\r\n\r\n layout_choropleth = dict(geo=dict(scope='europe',\r\n projection={'type': 'equirectangular'},\r\n bgcolor='#f9f9f9',\r\n showframe = False\r\n ),\r\n title=dict(text='Number of Overnight Arrivals',\r\n x=.5,\r\n # Title relative position according to the xaxis, range (0,1)\r\n ),\r\n font=dict(size=12,color=\"#4d4d4d\"),\r\n paper_bgcolor='#f9f9f9')\r\n\r\n ############################################Second Lines Plot######################################################\r\n dataContinents = df[df.Country_Name.isna()]\r\n color_numb2 = 0\r\n data_line = []\r\n for country in continent:\r\n data_line.append( dict(type = 'scatter',\r\n x = dataContinents.loc[dataContinents['Continent_Name'] == country]['Years'],\r\n y = dataContinents.loc[dataContinents['Continent_Name'] == country]['Receipts_PCapita'],\r\n name = country,\r\n line_color= color_array[color_numb2]))\r\n color_numb2 += 1\r\n\r\n layout_line = dict(title = dict(text = 'Tourism Revenue per capita',x=0.5),\r\n xaxis = dict(title = 'Year'),\r\n yaxis = dict(title = 'Tourism Revenue per capita'),\r\n paper_bgcolor = '#f9f9f9',\r\n template='none',\r\n font = dict(size=12,color=\"#4d4d4d\"),\r\n legend = dict(orientation='h',yanchor='top',xanchor='center',y=-0.3,x=0.5))\r\n\r\n ############################################Third Bubbles Plot#####################################################\r\n dataBubble = df.dropna()\r\n dataBubble.sort_values(by ='Years', inplace = True)\r\n data_bubble = px.scatter(dataBubble.loc[dataBubble['Country_Name'].isin(countries)], x=\"GDP\", y=\"Receipts_PCapita\",\r\n animation_frame=\"Years\", animation_group=\"Country_Name\",\r\n size=\"Ratio GDP\", hover_name=\"Country_Name\", color=\"Country_Name\",\r\n log_x=True, size_max=40, range_x=[300, 120000], range_y=[0, 11000])\r\n\r\n layout_bubble = data_bubble.update_layout(title=dict(text='Tourism and GDP per capita', x=0.5),\r\n xaxis=dict(title='GDP per capita'),\r\n yaxis=dict(title='Tourism GDP per capita'),\r\n paper_bgcolor='#f9f9f9',\r\n font=dict(size=12,color=\"#4d4d4d\"),\r\n template='none'\r\n\r\n )\r\n\r\n data_bubble.for_each_trace(lambda t: t.update(name=t.name.replace(\"Country_Name=\", \"\")))\r\n ############################################Forth Radar Plot######################################################\r\n\r\n labels = ['GDP_N', 'Expenditures_N', 'PopTotal_N', 'Arrivals_N', 'Departure_N', 'GDP_N']\r\n data_radar =[]\r\n\r\n color_numb = 0\r\n for country in countries:\r\n dataradar = df[['GDP_N', 'Expenditures_N', 'PopTotal_N', 'Arrivals_N', 'Departure_N']].loc[\r\n (df['Years'] == year) & (df['Country_Name'] == country)]\r\n values = dataradar.values.flatten().tolist()\r\n values += values[:1]\r\n data_radar.append(dict(type='scatterpolar',\r\n r=values,\r\n theta=labels,\r\n fill='toself',\r\n name=country,\r\n line_color= color_array[color_numb],\r\n mode='lines'\r\n ))\r\n color_numb += 1\r\n\r\n layout_radar = dict(\r\n title='Tourism and related metrics',\r\n font=dict(\r\n # family = 'Arial, sans-serif;',\r\n size=12,\r\n color=\"#4d4d4d\"\r\n ),\r\n title_x=0.5,\r\n polar=dict(\r\n radialaxis=dict(\r\n visible=True,\r\n range=[-3, 3]\r\n )),\r\n paper_bgcolor='#f9f9f9',\r\n template = 'none',\r\n showlegend=True\r\n )\r\n\r\n ############################################Fifth Bar Plot##########################################################\r\n titles = ['Jobs per 1k Tourists', 'Expenditure required for one Job']\r\n plot = make_subplots(rows=1,\r\n cols=2,\r\n subplot_titles=titles,\r\n specs= [[{}, {}]], shared_xaxes = True,\r\n shared_yaxes=False, vertical_spacing=0.001\r\n )\r\n\r\n\r\n ############################################Fifth Subplot Plot##########################################################\r\n data_bar = []\r\n for country in countries:\r\n df_bar = df.loc[df['Country_Name'] == country]\r\n\r\n x_bar = df_bar['Country_Name']\r\n y_bar = (df_bar.loc[df_bar['Years'] == year]['Jobs_per_tourist'])\r\n plot.append_trace(go.Bar(\r\n x=y_bar,\r\n y=x_bar,\r\n marker=dict(\r\n color='rgba(50, 171, 96, 0.6)',\r\n line=dict(\r\n color='rgba(50, 171, 96, 1.0)',\r\n width=1),\r\n ),\r\n orientation='h',\r\n showlegend= False\r\n ), 1, 1)\r\n\r\n for country in countries:\r\n df_markers = df.loc[(df['Country_Name'] == country)]\r\n\r\n plot.append_trace(go.Bar(\r\n x=df_markers.loc[df_markers['Years']==year]['Cost_of_oneJob'],\r\n y=df_markers['Country_Name'],\r\n marker=dict(\r\n color='rgba(50, 171, 96, 0.6)',\r\n line=dict(\r\n color='rgba(50, 171, 96, 1.0)',\r\n width=1),\r\n ),\r\n orientation='h',\r\n showlegend=False\r\n ), 1, 2)\r\n\r\n\r\n plot.update_layout(\r\n title='Tourism Impact on Jobs',\r\n yaxis=dict(\r\n showgrid=False,\r\n showline=False,\r\n showticklabels=True,\r\n domain=[0, 0.85],\r\n ),\r\n yaxis2=dict(\r\n showgrid=False,\r\n showline=True,\r\n showticklabels=False,\r\n linecolor='rgba(102, 102, 102, 0.8)',\r\n linewidth=2,\r\n domain=[0, 0.85],\r\n ),\r\n xaxis=dict(\r\n zeroline=False,\r\n showline=False,\r\n showticklabels=True,\r\n showgrid=True,\r\n domain=[0, 0.42],\r\n ),\r\n xaxis2=dict(\r\n zeroline=False,\r\n showline=False,\r\n showticklabels=True,\r\n showgrid=True,\r\n domain=[0.47, 1],\r\n side='top',\r\n dtick=25000,\r\n ),\r\n legend=dict(x=0.029, y=1.038, font_size=10),\r\n margin=dict(l=100, r=20, t=70, b=70),\r\n paper_bgcolor='#f9f9f9',\r\n font=dict(size=12, color=\"#4d4d4d\"),\r\n title_x=0.5,\r\n plot_bgcolor='rgb(248, 248, 255)',\r\n )\r\n\r\n return go.Figure(data=data_choropleth, layout=layout_choropleth), \\\r\n go.Figure(data=data_line, layout=layout_line), \\\r\n go.Figure(data=data_bubble, layout=layout_bubble), \\\r\n go.Figure(data=data_radar, layout=layout_radar), \\\r\n plot\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n\r\n", "repo_name": "andreiantunes/dv_test", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 13146, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 35, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 37, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 39, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 40, "usage_type": "call"}, {"api_name": "dash_html_components.H3", "line_number": 45, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 50, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 52, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 53, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 54, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 61, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 63, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 64, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 71, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 73, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 74, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 84, "usage_type": "call"}, {"api_name": "dash_core_components.Markdown", "line_number": 86, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 94, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 94, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 97, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 99, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 99, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 101, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 101, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 105, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 109, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 109, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 113, "usage_type": "call"}, {"api_name": "dash_html_components.H6", "line_number": 114, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 186, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 186, "usage_type": "name"}, {"api_name": "plotly.subplots.make_subplots", "line_number": 242, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 257, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 257, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 273, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 273, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 327, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 327, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 328, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 328, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 329, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 329, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 330, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 330, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 125, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 126, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 127, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 128, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 129, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 132, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 133, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 134, "usage_type": "call"}]}
+{"seq_id": "69825853355", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSupport for the Twitter v2 API.\n\"\"\"\n\nimport re\nimport json\nimport time\nimport logging\nimport datetime\nimport requests\n\nfrom oauthlib.oauth2 import BackendApplicationClient\nfrom requests_oauthlib import OAuth1Session, OAuth2Session\n\nfrom twarc import expansions\nfrom twarc.decorators2 import *\nfrom twarc.version import version\n\n\nlog = logging.getLogger(\"twarc\")\n\n\nclass Twarc2:\n \"\"\"\n A client for the Twitter v2 API.\n \"\"\"\n\n def __init__(\n self,\n consumer_key=None,\n consumer_secret=None,\n access_token=None,\n access_token_secret=None,\n bearer_token=None,\n connection_errors=0,\n metadata=True,\n ):\n \"\"\"\n Instantiate a Twarc2 instance to talk to the Twitter V2+ API.\n\n The client can use either App or User authentication, but only one at a\n time. Whether app auth or user auth is used depends on which credentials\n are provided on initialisation:\n\n 1. If a `bearer_token` is passed, app auth is always used.\n 2. If a `consumer_key` and `consumer_secret` are passed without an\n `access_token` and `access_token_secret`, app auth is used.\n 3. If `consumer_key`, `consumer_secret`, `access_token` and\n `access_token_secret` are all passed, then user authentication\n is used instead.\n\n Args:\n consumer_key (str):\n The API key.\n consumer_secret (str):\n The API secret.\n access_token (str):\n The Access Token\n access_token_secret (str):\n The Access Token Secret\n bearer_token (str):\n Bearer Token, can be generated from API keys.\n connection_errors (int):\n Number of retries for GETs\n metadata (bool):\n Append `__twarc` metadata to results.\n \"\"\"\n self.api_version = \"2\"\n self.connection_errors = connection_errors\n self.metadata = metadata\n self.bearer_token = None\n\n if bearer_token:\n self.bearer_token = bearer_token\n self.auth_type = \"application\"\n\n elif consumer_key and consumer_secret:\n if access_token and access_token_secret:\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n self.auth_type = \"user\"\n\n else:\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.auth_type = \"application\"\n\n else:\n raise ValueError(\n \"Must pass either a bearer_token or consumer/access_token keys and secrets\"\n )\n\n self.client = None\n self.last_response = None\n\n self.connect()\n\n def _search(\n self,\n url,\n query,\n since_id,\n until_id,\n start_time,\n end_time,\n max_results,\n granularity=None,\n sleep_between=0,\n ):\n if granularity:\n params = {}\n params[\"granularity\"] = granularity\n else:\n params = expansions.EVERYTHING.copy()\n\n params[\"query\"] = query\n\n if max_results:\n params[\"max_results\"] = max_results\n if since_id:\n params[\"since_id\"] = since_id\n if until_id:\n params[\"until_id\"] = until_id\n if start_time:\n params[\"start_time\"] = _ts(start_time)\n if end_time:\n params[\"end_time\"] = _ts(end_time)\n\n count = 0\n made_call = time.monotonic()\n\n for response in self.get_paginated(url, params=params):\n # can't return without 'data' if there are no results\n if \"data\" in response:\n count += len(response[\"data\"])\n yield response\n\n else:\n log.info(f\"Retrieved an empty page of results.\")\n\n # Calculate the amount of time to sleep, accounting for any\n # processing time used by the rest of the application.\n # This is to satisfy the 1 request / 1 second rate limit\n # on the search/all endpoint.\n time.sleep(max(0, sleep_between - (time.monotonic() - made_call)))\n made_call = time.monotonic()\n\n log.info(f\"No more results for search {query}.\")\n\n def search_recent(\n self,\n query,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n max_results=100,\n ):\n \"\"\"\n Search Twitter for the given query in the last seven days,\n using the `/search/recent` endpoint.\n\n Calls [GET /2/tweets/search/recent](https://developer.twitter.com/en/docs/twitter-api/tweets/search/api-reference/get-tweets-search-recent)\n\n Args:\n query (str):\n The query string to be passed directly to the Twitter API.\n since_id (int):\n Return all tweets since this tweet_id.\n until_id (int):\n Return all tweets up to this tweet_id.\n start_time (datetime):\n Return all tweets after this time (UTC datetime).\n end_time (datetime):\n Return all tweets before this time (UTC datetime).\n max_results (int):\n The maximum number of results per request. Max is 100.\n\n Returns:\n generator[dict]: a generator, dict for each paginated response.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/recent\"\n return self._search(\n url, query, since_id, until_id, start_time, end_time, max_results\n )\n\n @requires_app_auth\n def search_all(\n self,\n query,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n max_results=100, # temp fix for #504\n ):\n \"\"\"\n Search Twitter for the given query in the full archive,\n using the `/search/all` endpoint (Requires Academic Access).\n\n Calls [GET /2/tweets/search/all](https://developer.twitter.com/en/docs/twitter-api/tweets/search/api-reference/get-tweets-search-all)\n\n Args:\n query (str):\n The query string to be passed directly to the Twitter API.\n since_id (int):\n Return all tweets since this tweet_id.\n until_id (int):\n Return all tweets up to this tweet_id.\n start_time (datetime):\n Return all tweets after this time (UTC datetime). If none of start_time, since_id, or until_id\n are specified, this defaults to 2006-3-21 to search the entire history of Twitter.\n end_time (datetime):\n Return all tweets before this time (UTC datetime).\n max_results (int):\n The maximum number of results per request. Max is 500.\n\n Returns:\n generator[dict]: a generator, dict for each paginated response.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/all\"\n\n # start time defaults to the beginning of Twitter to override the\n # default of the last month. Only do this if start_time is not already\n # specified and since_id and until_id aren't being used\n if start_time is None and since_id is None and until_id is None:\n start_time = datetime.datetime(2006, 3, 21, tzinfo=datetime.timezone.utc)\n\n return self._search(\n url,\n query,\n since_id,\n until_id,\n start_time,\n end_time,\n max_results,\n sleep_between=1.05,\n )\n\n @requires_app_auth\n def counts_recent(\n self,\n query,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n granularity=\"hour\",\n ):\n \"\"\"\n Retrieve counts for the given query in the last seven days,\n using the `/counts/recent` endpoint.\n\n Calls [GET /2/tweets/counts/recent]()\n\n Args:\n query (str):\n The query string to be passed directly to the Twitter API.\n since_id (int):\n Return all tweets since this tweet_id.\n until_id (int):\n Return all tweets up to this tweet_id.\n start_time (datetime):\n Return all tweets after this time (UTC datetime).\n end_time (datetime):\n Return all tweets before this time (UTC datetime).\n granularity (str):\n Count aggregation level: `day`, `hour`, `minute`.\n Default is `hour`.\n\n Returns:\n generator[dict]: a generator, dict for each paginated response.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/counts/recent\"\n return self._search(\n url, query, since_id, until_id, start_time, end_time, None, granularity\n )\n\n @requires_app_auth\n def counts_all(\n self,\n query,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n granularity=\"hour\",\n ):\n \"\"\"\n Retrieve counts for the given query in the full archive,\n using the `/search/all` endpoint (Requires Academic Access).\n\n Calls [GET /2/tweets/counts/all]()\n\n Args:\n query (str):\n The query string to be passed directly to the Twitter API.\n since_id (int):\n Return all tweets since this tweet_id.\n until_id (int):\n Return all tweets up to this tweet_id.\n start_time (datetime):\n Return all tweets after this time (UTC datetime).\n end_time (datetime):\n Return all tweets before this time (UTC datetime).\n granularity (str):\n Count aggregation level: `day`, `hour`, `minute`.\n Default is `hour`.\n\n Returns:\n generator[dict]: a generator, dict for each paginated response.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/counts/all\"\n\n return self._search(\n url,\n query,\n since_id,\n until_id,\n start_time,\n end_time,\n None,\n granularity,\n sleep_between=1.05,\n )\n\n def tweet_lookup(self, tweet_ids):\n \"\"\"\n Lookup tweets, taking an iterator of IDs and returning pages of fully\n expanded tweet objects.\n\n This can be used to rehydrate a collection shared as only tweet IDs.\n Yields one page of tweets at a time, in blocks of up to 100.\n\n Calls [GET /2/tweets](https://developer.twitter.com/en/docs/twitter-api/tweets/lookup/api-reference/get-tweets)\n\n Args:\n tweet_ids (iterable): A list of tweet IDs\n\n Returns:\n generator[dict]: a generator, dict for each batch of 100 tweets.\n \"\"\"\n\n def lookup_batch(tweet_id):\n\n url = \"https://api.twitter.com/2/tweets\"\n\n params = expansions.EVERYTHING.copy()\n params[\"ids\"] = \",\".join(tweet_id)\n\n resp = self.get(url, params=params)\n data = resp.json()\n\n if self.metadata:\n data = _append_metadata(data, resp.url)\n\n return data\n\n tweet_id_batch = []\n\n for tweet_id in tweet_ids:\n tweet_id_batch.append(str(int(tweet_id)))\n\n if len(tweet_id_batch) == 100:\n yield lookup_batch(tweet_id_batch)\n tweet_id_batch = []\n\n if tweet_id_batch:\n yield (lookup_batch(tweet_id_batch))\n\n def user_lookup(self, users, usernames=False):\n \"\"\"\n Returns fully populated user profiles for the given iterator of\n user_id or usernames. By default user_lookup expects user ids but if\n you want to pass in usernames set usernames = True.\n\n Yields one page of results at a time (in blocks of at most 100 user\n profiles).\n\n Calls [GET /2/users](https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users)\n\n Args:\n users (iterable): User IDs or usernames to lookup.\n usernames (bool): Parse `users` as usernames, not IDs.\n\n Returns:\n generator[dict]: a generator, dict for each batch of 100 users.\n \"\"\"\n\n if usernames:\n url = \"https://api.twitter.com/2/users/by\"\n else:\n url = \"https://api.twitter.com/2/users\"\n\n def lookup_batch(users):\n params = expansions.USER_EVERYTHING.copy()\n if usernames:\n params[\"usernames\"] = \",\".join(users)\n else:\n params[\"ids\"] = \",\".join(users)\n\n resp = self.get(url, params=params)\n data = resp.json()\n\n if self.metadata:\n data = _append_metadata(data, resp.url)\n\n return data\n\n batch = []\n for item in users:\n batch.append(str(item).strip())\n if len(batch) == 100:\n yield lookup_batch(batch)\n batch = []\n\n if batch:\n yield (lookup_batch(batch))\n\n @catch_request_exceptions\n @requires_app_auth\n def sample(self, event=None, record_keepalive=False):\n \"\"\"\n Returns a sample of all publicly posted tweets.\n\n The sample is based on slices of each second, not truly randomised. The\n same tweets are returned for all users of this endpoint.\n\n If a `threading.Event` is provided and the event is set, the\n sample will be interrupted. This can be used for coordination with other\n programs.\n\n Calls [GET /2/tweets/sample/stream](https://developer.twitter.com/en/docs/twitter-api/tweets/sampled-stream/api-reference/get-tweets-sample-stream)\n\n Args:\n event (threading.Event): Manages a flag to stop the process.\n record_keepalive (bool): whether to output keep-alive events.\n\n Returns:\n generator[dict]: a generator, dict for each tweet.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/sample/stream\"\n params = expansions.EVERYTHING.copy()\n yield from self._stream(url, params, event, record_keepalive)\n\n @requires_app_auth\n def add_stream_rules(self, rules):\n \"\"\"\n Adds new rules to the filter stream.\n\n Calls [POST /2/tweets/search/stream/rules](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/post-tweets-search-stream-rules)\n\n Args:\n rules (list[dict]): A list of rules to add.\n\n Returns:\n dict: JSON Response from Twitter API.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/stream/rules\"\n return self.post(url, {\"add\": rules}).json()\n\n @requires_app_auth\n def get_stream_rules(self):\n \"\"\"\n Returns a list of rules for the filter stream.\n\n Calls [GET /2/tweets/search/stream/rules](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/get-tweets-search-stream-rules)\n\n Returns:\n dict: JSON Response from Twitter API with a list of defined rules.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/stream/rules\"\n return self.get(url).json()\n\n @requires_app_auth\n def delete_stream_rule_ids(self, rule_ids):\n \"\"\"\n Deletes rules from the filter stream.\n\n Calls [POST /2/tweets/search/stream/rules](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/post-tweets-search-stream-rules)\n\n Args:\n rule_ids (list[int]): A list of rule ids to delete.\n\n Returns:\n dict: JSON Response from Twitter API.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/stream/rules\"\n return self.post(url, {\"delete\": {\"ids\": rule_ids}}).json()\n\n @requires_app_auth\n def stream(self, event=None, record_keepalive=False):\n \"\"\"\n Returns a stream of tweets matching the defined rules.\n\n Rules can be added or removed out-of-band, without disconnecting.\n Tweet results will contain metadata about the rule that matched it.\n\n If event is set with a threading.Event object, the sample stream\n will be interrupted. This can be used for coordination with other\n programs.\n\n Calls [GET /2/tweets/search/stream](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/get-tweets-search-stream)\n\n Args:\n event (threading.Event): Manages a flag to stop the process.\n record_keepalive (bool): whether to output keep-alive events.\n\n Returns:\n generator[dict]: a generator, dict for each tweet.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/stream\"\n params = expansions.EVERYTHING.copy()\n yield from self._stream(url, params, event, record_keepalive)\n\n def _stream(self, url, params, event, record_keepalive, tries=30):\n \"\"\"\n A generator that handles streaming data from a response and catches and\n logs any request exceptions, sleeps (exponential backoff) and restarts\n the stream.\n\n Args:\n url (str): the streaming endpoint URL\n params (dict): any query paramters to use with the url\n event (threading.Event): Manages a flag to stop the process.\n record_keepalive (bool): whether to output keep-alive events.\n tries (int): the number of times to retry connecting after an error\n Returns:\n generator[dict]: A generator of tweet dicts.\n \"\"\"\n errors = 0\n while True:\n log.info(f\"connecting to stream {url}\")\n resp = self.get(url, params=params, stream=True)\n\n try:\n for line in resp.iter_lines():\n errors = 0\n\n # quit & close the stream if the event is set\n if event and event.is_set():\n log.info(\"stopping response stream\")\n resp.close()\n return\n\n # return the JSON data w/ optional keep-alive\n if not line:\n log.info(\"keep-alive\")\n if record_keepalive:\n yield \"keep-alive\"\n continue\n else:\n data = json.loads(line.decode())\n if self.metadata:\n data = _append_metadata(data, resp.url)\n yield data\n if self._check_for_disconnect(data):\n break\n\n except requests.exceptions.RequestException as e:\n log.warn(\"caught exception during streaming: %s\", e)\n errors += 1\n if errors > tries:\n log.error(f\"too many consecutive errors ({tries}). stopping\")\n return\n else:\n secs = errors ** 2\n log.info(\"sleeping %s seconds before reconnecting\", secs)\n time.sleep(secs)\n\n def _timeline(\n self,\n user_id,\n timeline_type,\n since_id,\n until_id,\n start_time,\n end_time,\n exclude_retweets,\n exclude_replies,\n ):\n \"\"\"\n Helper function for user and mention timelines\n\n Calls [GET /2/users/:id/tweets](https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-tweets)\n or [GET /2/users/:id/mentions](https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-mentions)\n\n Args:\n user_id (int): ID of the user.\n timeline_type (str): timeline type: `tweets` or `mentions`\n since_id (int): results with a Tweet ID greater than (newer) than specified\n until_id (int): results with a Tweet ID less than (older) than specified\n start_time (datetime): oldest UTC timestamp from which the Tweets will be provided\n end_time (datetime): newest UTC timestamp from which the Tweets will be provided\n exclude_retweets (boolean): remove retweets from timeline\n exlucde_replies (boolean): remove replies from timeline\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n\n url = f\"https://api.twitter.com/2/users/{user_id}/{timeline_type}\"\n\n params = expansions.EVERYTHING.copy()\n params[\"max_results\"] = 100\n\n excludes = []\n if exclude_retweets:\n excludes.append(\"retweets\")\n if exclude_replies:\n excludes.append(\"replies\")\n\n if since_id:\n params[\"since_id\"] = since_id\n if until_id:\n params[\"until_id\"] = until_id\n if start_time:\n params[\"start_time\"] = _ts(start_time)\n if end_time:\n params[\"end_time\"] = _ts(end_time)\n if len(excludes) > 0:\n params[\"exclude\"] = \",\".join(excludes)\n\n count = 0\n for response in self.get_paginated(url, params=params):\n # can return without 'data' if there are no results\n if \"data\" in response:\n count += len(response[\"data\"])\n yield response\n else:\n log.info(f\"Retrieved an empty page of results for timeline {user_id}\")\n\n log.info(f\"No more results for timeline {user_id}.\")\n\n def timeline(\n self,\n user,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n exclude_retweets=False,\n exclude_replies=False,\n ):\n \"\"\"\n Retrieve up to the 3200 most recent tweets made by the given user.\n\n Calls [GET /2/users/:id/tweets](https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-tweets)\n\n Args:\n user (int): ID of the user.\n since_id (int): results with a Tweet ID greater than (newer) than specified\n until_id (int): results with a Tweet ID less than (older) than specified\n start_time (datetime): oldest UTC timestamp from which the Tweets will be provided\n end_time (datetime): newest UTC timestamp from which the Tweets will be provided\n exclude_retweets (boolean): remove retweets from timeline results\n exclude_replies (boolean): remove replies from timeline results\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n user_id = self._ensure_user_id(user)\n return self._timeline(\n user_id,\n \"tweets\",\n since_id,\n until_id,\n start_time,\n end_time,\n exclude_retweets,\n exclude_replies,\n )\n\n def mentions(\n self,\n user,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n exclude_retweets=False,\n exclude_replies=False,\n ):\n \"\"\"\n Retrieve up to the 800 most recent tweets mentioning the given user.\n\n Calls [GET /2/users/:id/mentions](https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-mentions)\n\n Args:\n user (int): ID of the user.\n since_id (int): results with a Tweet ID greater than (newer) than specified\n until_id (int): results with a Tweet ID less than (older) than specified\n start_time (datetime): oldest UTC timestamp from which the Tweets will be provided\n end_time (datetime): newest UTC timestamp from which the Tweets will be provided\n exclude_retweets (boolean): remove retweets from timeline results\n exclude_replies (boolean): remove replies from timeline results\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n user_id = self._ensure_user_id(user)\n return self._timeline(\n user_id,\n \"mentions\",\n since_id,\n until_id,\n start_time,\n end_time,\n exclude_retweets,\n exclude_replies,\n )\n\n def following(self, user, user_id=None):\n \"\"\"\n Retrieve the user profiles of accounts followed by the given user.\n\n Calls [GET /2/users/:id/following](https://developer.twitter.com/en/docs/twitter-api/users/follows/api-reference/get-users-id-following)\n\n Args:\n user (int): ID of the user.\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n user_id = self._ensure_user_id(user) if not user_id else user_id\n params = expansions.USER_EVERYTHING.copy()\n params[\"max_results\"] = 1000\n url = f\"https://api.twitter.com/2/users/{user_id}/following\"\n return self.get_paginated(url, params=params)\n\n def followers(self, user, user_id=None):\n \"\"\"\n Retrieve the user profiles of accounts following the given user.\n\n Calls [GET /2/users/:id/followers](https://developer.twitter.com/en/docs/twitter-api/users/follows/api-reference/get-users-id-followers)\n\n Args:\n user (int): ID of the user.\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n user_id = self._ensure_user_id(user) if not user_id else user_id\n params = expansions.USER_EVERYTHING.copy()\n params[\"max_results\"] = 1000\n url = f\"https://api.twitter.com/2/users/{user_id}/followers\"\n return self.get_paginated(url, params=params)\n\n @catch_request_exceptions\n @rate_limit\n def get(self, *args, **kwargs):\n \"\"\"\n Make a GET request to a specified URL.\n\n Args:\n *args: Variable length argument list.\n **kwargs: Arbitrary keyword arguments.\n\n Returns:\n requests.Response: Response from Twitter API.\n \"\"\"\n if not self.client:\n self.connect()\n log.info(\"getting %s %s\", args, kwargs)\n r = self.last_response = self.client.get(*args, timeout=(3.05, 31), **kwargs)\n return r\n\n def get_paginated(self, *args, **kwargs):\n \"\"\"\n A wrapper around the `get` method that handles Twitter token based\n pagination.\n\n Yields one page (one API response) at a time.\n\n Args:\n *args: Variable length argument list.\n **kwargs: Arbitrary keyword arguments.\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n\n resp = self.get(*args, **kwargs)\n page = resp.json()\n\n url = args[0]\n\n if self.metadata:\n page = _append_metadata(page, resp.url)\n\n yield page\n\n endings = [\"mentions\", \"tweets\", \"following\", \"followers\"]\n\n # The search endpoints only take a next_token, but the timeline\n # endpoints take a pagination_token instead - this is a bit of a hack,\n # but check the URL ending to see which we should use.\n if any(url.endswith(end) for end in endings):\n token_param = \"pagination_token\"\n else:\n token_param = \"next_token\"\n\n while \"meta\" in page and \"next_token\" in page[\"meta\"]:\n if \"params\" in kwargs:\n kwargs[\"params\"][token_param] = page[\"meta\"][\"next_token\"]\n else:\n kwargs[\"params\"] = {token_param: page[\"meta\"][\"next_token\"]}\n\n resp = self.get(*args, **kwargs)\n page = resp.json()\n\n if self.metadata:\n page = _append_metadata(page, resp.url)\n\n yield page\n\n @catch_request_exceptions\n @rate_limit\n def post(self, url, json_data):\n \"\"\"\n Make a POST request to the specified URL.\n\n Args:\n url (str): URL to make a POST request\n json_data (dict): JSON data to send.\n\n Returns:\n requests.Response: Response from Twitter API.\n \"\"\"\n if not self.client:\n self.connect()\n return self.client.post(url, json=json_data)\n\n def connect(self):\n \"\"\"\n Sets up the HTTP session to talk to Twitter. If one is active it is\n closed and another one is opened.\n \"\"\"\n if self.last_response:\n self.last_response.close()\n\n if self.client:\n self.client.close()\n\n if self.auth_type == \"application\" and self.bearer_token:\n log.info(\"creating HTTP session headers for app auth.\")\n auth = f\"Bearer {self.bearer_token}\"\n log.debug(\"authorization: %s\", auth)\n self.client = requests.Session()\n self.client.headers.update({\"Authorization\": auth})\n elif self.auth_type == \"application\":\n log.info(\"creating app auth client via OAuth2\")\n log.debug(\"client_id: %s\", self.consumer_key)\n log.debug(\"client_secret: %s\", self.consumer_secret)\n client = BackendApplicationClient(client_id=self.consumer_key)\n self.client = OAuth2Session(client=client)\n self.client.fetch_token(\n token_url=\"https://api.twitter.com/oauth2/token\",\n client_id=self.consumer_key,\n client_secret=self.consumer_secret,\n )\n else:\n log.info(\"creating user auth client\")\n log.debug(\"client_id: %s\", self.consumer_key)\n log.debug(\"client_secret: %s\", self.consumer_secret)\n log.debug(\"resource_owner_key: %s\", self.access_token)\n log.debug(\"resource_owner_secret: %s\", self.access_token_secret)\n self.client = OAuth1Session(\n client_key=self.consumer_key,\n client_secret=self.consumer_secret,\n resource_owner_key=self.access_token,\n resource_owner_secret=self.access_token_secret,\n )\n\n @requires_app_auth\n def compliance_job_list(self, job_type, status):\n \"\"\"\n Returns list of compliance jobs.\n\n Calls [GET /2/compliance/jobs](https://developer.twitter.com/en/docs/twitter-api/compliance/batch-compliance/api-reference/get-compliance-jobs)\n\n Args:\n job_type (str): Filter by job type - either tweets or users.\n status (str): Filter by job status. Only one of 'created', 'in_progress', 'complete', 'failed' can be specified. If not set, returns all.\n\n Returns:\n list[dict]: A list of jobs.\n \"\"\"\n params = {}\n if job_type:\n params[\"type\"] = job_type\n if status:\n params[\"status\"] = status\n result = self.client.get(\n \"https://api.twitter.com/2/compliance/jobs\", params=params\n ).json()\n if \"data\" in result or not result:\n return result\n else:\n raise ValueError(f\"Unknown response from twitter: {result}\")\n\n @requires_app_auth\n def compliance_job_get(self, job_id):\n \"\"\"\n Returns a compliance job.\n\n Calls [GET /2/compliance/jobs/{job_id}](https://developer.twitter.com/en/docs/twitter-api/compliance/batch-compliance/api-reference/get-compliance-jobs-id)\n\n Args:\n job_id (int): The ID of the compliance job.\n\n Returns:\n dict: A compliance job.\n \"\"\"\n result = self.client.get(\n \"https://api.twitter.com/2/compliance/jobs/{}\".format(job_id)\n )\n if result.status_code == 200:\n result = result.json()\n else:\n raise ValueError(f\"Error from API, response: {result.status_code}\")\n if \"data\" in result:\n return result\n else:\n raise ValueError(f\"Unknown response from twitter: {result}\")\n\n @requires_app_auth\n def compliance_job_create(self, job_type, job_name, resumable=False):\n \"\"\"\n Creates a new compliace job.\n\n Calls [POST /2/compliance/jobs](https://developer.twitter.com/en/docs/twitter-api/compliance/batch-compliance/api-reference/post-compliance-jobs)\n\n Args:\n job_type (str): The type of job to create. Either 'tweets' or 'users'.\n job_name (str): Optional name for the job.\n resumable (bool): Whether or not the job upload is resumable.\n \"\"\"\n payload = {}\n payload[\"type\"] = job_type\n payload[\"resumable\"] = resumable\n if job_name:\n payload[\"name\"] = job_name\n\n result = self.client.post(\n \"https://api.twitter.com/2/compliance/jobs\", json=payload\n )\n\n if result.status_code == 200:\n result = result.json()\n else:\n raise ValueError(f\"Error from API, response: {result.status_code}\")\n if \"data\" in result:\n return result\n else:\n raise ValueError(f\"Unknown response from twitter: {result}\")\n\n def _id_exists(self, user):\n \"\"\"\n Returns True if the user id exists\n \"\"\"\n try:\n error_name = next(self.user_lookup([user]))[\"errors\"][0][\"title\"]\n return error_name != \"Not Found Error\"\n except KeyError:\n return True\n\n def _ensure_user_id(self, user):\n \"\"\"\n Always return a valid user id, look up if not numeric.\n \"\"\"\n user = str(user)\n is_numeric = re.match(r\"^\\d+$\", user)\n\n if len(user) > 15 or (is_numeric and self._id_exists(user)):\n return user\n else:\n results = next(self.user_lookup([user], usernames=True))\n if \"data\" in results and len(results[\"data\"]) > 0:\n return results[\"data\"][0][\"id\"]\n elif is_numeric:\n return user\n else:\n raise ValueError(f\"No such user {user}\")\n\n def _ensure_user(self, user):\n \"\"\"\n Always return a valid user object.\n \"\"\"\n user = str(user)\n is_numeric = re.match(r\"^\\d+$\", user)\n\n lookup = []\n if len(user) > 15 or (is_numeric and self._id_exists(user)):\n lookup = expansions.ensure_flattened(list(self.user_lookup([user])))\n else:\n lookup = expansions.ensure_flattened(\n list(self.user_lookup([user], usernames=True))\n )\n if lookup:\n return lookup[-1]\n else:\n raise ValueError(f\"No such user {user}\")\n\n def _check_for_disconnect(self, data):\n \"\"\"\n Look for disconnect errors in a response, and reconnect if found. The\n function returns True if a disconnect was found and False otherwise.\n \"\"\"\n for error in data.get(\"errors\", []):\n if error.get(\"disconnect_type\") == \"OperationalDisconnect\":\n log.info(\"Received operational disconnect message, reconnecting\")\n self.connect()\n return True\n return False\n\n\ndef _ts(dt):\n \"\"\"\n Return ISO 8601 / RFC 3339 datetime in UTC. If no timezone is specified it\n is assumed to be in UTC. The Twitter API does not accept microseconds.\n\n Args:\n dt (datetime): a `datetime` object to format.\n\n Returns:\n str: an ISO 8601 / RFC 3339 datetime in UTC.\n \"\"\"\n if dt.tzinfo:\n dt = dt.astimezone(datetime.timezone.utc)\n else:\n dt = dt.replace(tzinfo=datetime.timezone.utc)\n return dt.isoformat(timespec=\"seconds\")\n\n\ndef _utcnow():\n \"\"\"\n Return _now_ in ISO 8601 / RFC 3339 datetime in UTC.\n\n Returns:\n datetime: Current timestamp in UTC.\n \"\"\"\n return datetime.datetime.now(datetime.timezone.utc).isoformat(timespec=\"seconds\")\n\n\ndef _append_metadata(result, url):\n \"\"\"\n Appends `__twarc` metadata to the result.\n Adds the full URL with parameters used, the version\n and current timestamp in seconds.\n\n Args:\n result (dict): API Response to append data to.\n url (str): URL of the API endpoint called.\n\n Returns:\n dict: API Response with append metadata\n \"\"\"\n result[\"__twarc\"] = {\"url\": url, \"version\": version, \"retrieved_at\": _utcnow()}\n return result\n", "repo_name": "CharleoY/MultiModal-Project", "sub_path": "Crawllers/TwitterCrawler/twarc/client2.py", "file_name": "client2.py", "file_ext": "py", "file_size_in_byte": 36617, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "twarc.expansions.EVERYTHING.copy", "line_number": 118, "usage_type": "call"}, {"api_name": "twarc.expansions.EVERYTHING", "line_number": 118, "usage_type": "attribute"}, {"api_name": "twarc.expansions", "line_number": 118, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 134, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 149, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 149, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 150, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 231, "usage_type": "call"}, {"api_name": "datetime.timezone", "line_number": 231, "usage_type": "attribute"}, {"api_name": "twarc.expansions.EVERYTHING.copy", "line_number": 352, "usage_type": "call"}, {"api_name": "twarc.expansions.EVERYTHING", "line_number": 352, "usage_type": "attribute"}, {"api_name": "twarc.expansions", "line_number": 352, "usage_type": "name"}, {"api_name": "twarc.expansions.USER_EVERYTHING.copy", "line_number": 400, "usage_type": "call"}, {"api_name": "twarc.expansions.USER_EVERYTHING", "line_number": 400, "usage_type": "attribute"}, {"api_name": "twarc.expansions", "line_number": 400, "usage_type": "name"}, {"api_name": "twarc.expansions.EVERYTHING.copy", "line_number": 447, "usage_type": "call"}, {"api_name": "twarc.expansions.EVERYTHING", "line_number": 447, "usage_type": "attribute"}, {"api_name": "twarc.expansions", "line_number": 447, "usage_type": "name"}, {"api_name": "twarc.expansions.EVERYTHING.copy", "line_number": 517, "usage_type": "call"}, {"api_name": "twarc.expansions.EVERYTHING", "line_number": 517, "usage_type": "attribute"}, {"api_name": "twarc.expansions", "line_number": 517, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 557, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 564, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 573, "usage_type": "call"}, {"api_name": "twarc.expansions.EVERYTHING.copy", "line_number": 607, "usage_type": "call"}, {"api_name": "twarc.expansions.EVERYTHING", "line_number": 607, "usage_type": "attribute"}, {"api_name": "twarc.expansions", "line_number": 607, "usage_type": "name"}, {"api_name": "twarc.expansions.USER_EVERYTHING.copy", "line_number": 729, "usage_type": "call"}, {"api_name": "twarc.expansions.USER_EVERYTHING", "line_number": 729, "usage_type": "attribute"}, {"api_name": "twarc.expansions", "line_number": 729, "usage_type": "name"}, {"api_name": "twarc.expansions.USER_EVERYTHING.copy", "line_number": 747, "usage_type": "call"}, {"api_name": "twarc.expansions.USER_EVERYTHING", "line_number": 747, "usage_type": "attribute"}, {"api_name": "twarc.expansions", "line_number": 747, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 852, "usage_type": "call"}, {"api_name": "oauthlib.oauth2.BackendApplicationClient", "line_number": 858, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 859, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth1Session", "line_number": 871, "usage_type": "call"}, {"api_name": "re.match", "line_number": 976, "usage_type": "call"}, {"api_name": "re.match", "line_number": 994, "usage_type": "call"}, {"api_name": "twarc.expansions.ensure_flattened", "line_number": 998, "usage_type": "call"}, {"api_name": "twarc.expansions", "line_number": 998, "usage_type": "name"}, {"api_name": "twarc.expansions.ensure_flattened", "line_number": 1000, "usage_type": "call"}, {"api_name": "twarc.expansions", "line_number": 1000, "usage_type": "name"}, {"api_name": "datetime.timezone", "line_number": 1033, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 1035, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1046, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1046, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 1046, "usage_type": "attribute"}, {"api_name": "twarc.version.version", "line_number": 1062, "usage_type": "name"}]}
+{"seq_id": "71288724075", "text": "import pytest\nfrom click.testing import CliRunner\nfrom anydo_cli.commands.cli import entry_point, version\n\n\nclass TestCli(object):\n def test_entrypoint(self):\n runner = CliRunner()\n result = runner.invoke(entry_point)\n assert result.output != ''\n\n # not in love with having to supply a subcommand. http://click.pocoo.org/5/testing/\n @pytest.mark.parametrize('args, expected_output', [\n (['--version', 'list'], version),\n ])\n def test_correct_version_is_printed(self, args, expected_output):\n runner = CliRunner()\n result = runner.invoke(entry_point, args=args)\n assert expected_output == result.output.strip()\n", "repo_name": "dustinbrown/anydo_cli", "sub_path": "tests/test_commands/test_cli.py", "file_name": "test_cli.py", "file_ext": "py", "file_size_in_byte": 674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "click.testing.CliRunner", "line_number": 8, "usage_type": "call"}, {"api_name": "anydo_cli.commands.cli.entry_point", "line_number": 9, "usage_type": "argument"}, {"api_name": "click.testing.CliRunner", "line_number": 17, "usage_type": "call"}, {"api_name": "anydo_cli.commands.cli.entry_point", "line_number": 18, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "anydo_cli.commands.cli.version", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "31517147516", "text": "import collections\nimport functools\nimport importlib\nfrom savory_pie.auth import authorization, authorization_adapter\nfrom savory_pie.resources import EmptyParams\nfrom savory_pie.django.validators import validate, ValidationError\nfrom savory_pie.errors import SavoryPieError\n\n\ndef read_only_noop(func):\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n if not self._read_only:\n return func(self, *args, **kwargs)\n return inner\n\n\nclass ResourceClassUser(type):\n def __new__(cls, name, bases, d):\n\n def init_resource_class(self, rclass):\n self._arg_resource_class = rclass\n if isinstance(rclass, str) or isinstance(rclass, unicode):\n self._real_resource_class = None\n else:\n self._real_resource_class = rclass\n\n def getter(self):\n if self._real_resource_class is None and \\\n self._arg_resource_class is not None:\n rclass = self._arg_resource_class\n n = rclass.rindex('.')\n module = importlib.import_module(rclass[:n])\n self._real_resource_class = getattr(module, rclass[n+1:])\n return self._real_resource_class\n\n def setter(self, value):\n self._real_resource_class = value\n\n deler = None\n d['init_resource_class'] = init_resource_class\n d['_resource_class'] = property(getter, setter, deler, '')\n return type.__new__(cls, name, bases, d)\n\n\nclass Field(object):\n @property\n def name(self):\n name = getattr(self, '_attribute', None) or getattr(self, '_full_attribute', None)\n if not name:\n raise SavoryPieError(u'Unable to determine name for field: {0}'.format(self))\n return name\n\n def schema(self, ctx, **kwargs):\n schema = kwargs.pop('schema', {})\n if getattr(self, '_type', None):\n return dict({'type': self._type.__name__}.items() + schema.items())\n return schema\n\n\nclass AttributeField(Field):\n \"\"\"\n Simple Field that translates an object attribute to/from a dict.\n\n Parameters:\n\n ``attribute``\n attribute on the Model can be a multi-level expression - like\n related_entity.attribute\n\n ``type``\n expecting type of value -- int, bool, etc.\n\n ``published_property``\n optional -- name exposed in the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n ``optional``\n optional -- if missing, will not throw a ValidationError\n\n .. code-block:: python\n\n AttributeField('name', type=str)\n\n .. code-block:: javascript\n\n {'name': obj.name}\n\n .. code-block:: python\n\n AttributeField('other.age', type=int)\n\n .. code-block:: javascript\n\n {'age': obj.other.age}\n \"\"\"\n def __init__(self,\n attribute,\n type,\n published_property=None,\n use_prefetch=False,\n read_only=False,\n validator=None,\n optional=False,\n permission=None):\n self._full_attribute = attribute\n self._type = type\n self._published_property = published_property\n self._read_only = read_only\n self._optional = optional\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._bare_attribute)\n\n @property\n def _bare_attribute(self):\n return self._full_attribute.split('.')[-1]\n\n @property\n def _attrs(self):\n return self._full_attribute.split('.')\n\n def _get_object(self, root_obj):\n obj = root_obj\n for attr in self._attrs[:-1]:\n obj = getattr(obj, attr)\n if obj is None:\n return None\n return obj\n\n def _get(self, obj):\n obj = self._get_object(obj)\n if obj is None:\n return None\n else:\n return getattr(obj, self._bare_attribute)\n\n def _set(self, obj, value):\n obj = self._get_object(obj)\n # TODO: handle None\n return setattr(obj, self._bare_attribute, value)\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n attr = self._compute_property(ctx)\n if attr not in source_dict:\n if self._optional:\n return\n raise ValidationError(self, {'missingField': attr,\n 'target': type(target_obj).__name__})\n with ctx.target(target_obj):\n self._set(\n target_obj,\n self.to_python_value(ctx, source_dict[self._compute_property(ctx)])\n )\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n target_dict[self._compute_property(ctx)] = self.to_api_value(\n ctx,\n self._get(source_obj)\n )\n\n def to_python_value(self, ctx, api_value):\n return ctx.formatter.to_python_value(self._type, api_value)\n\n def to_api_value(self, ctx, python_value):\n return ctx.formatter.to_api_value(self._type, python_value)\n\n def validate_resource(self, ctx, key, resource, value):\n error_dict = {}\n if isinstance(self.validator, collections.Iterable):\n for validator in self.validator:\n validator.find_errors(error_dict, ctx, key, resource, self, value)\n else:\n self.validator.find_errors(error_dict, ctx, key, resource, self, value)\n return error_dict\n\n\nclass URIResourceField(Field):\n \"\"\"\n Field that exposes just the URI of related entity\n\n\n Parameters:\n\n ``attribute``\n name of the relationship between the parent object and the related\n object may only be single level\n\n ``resource_class``\n a ModelResource -- used to represent the related object needs to be\n fully addressable\n\n ``published_property``\n optional -- name exposed in the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n .. code-block:: python\n\n URIResourceField('other', OtherResource)\n\n .. code-block:: javascript\n\n {'other': '/api/other/{pk}'}\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self,\n attribute,\n resource_class,\n published_property=None,\n read_only=False,\n validator=None,\n permission=None):\n self._attribute = attribute\n self.init_resource_class(resource_class)\n self._published_property = published_property\n self._read_only = read_only\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._attribute)\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n uri = source_dict[self._compute_property(ctx)]\n if uri is not None:\n resource = ctx.resolve_resource_uri(uri)\n if resource is None:\n raise ValueError('invalid URI {0}: '.format(uri))\n\n setattr(target_obj, self._attribute, resource.model)\n else:\n setattr(target_obj, self._attribute, None)\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n sub_model = getattr(source_obj, self._attribute)\n if sub_model is not None:\n resource = self._resource_class(sub_model)\n target_dict[self._compute_property(ctx)] = ctx.build_resource_uri(resource)\n else:\n target_dict[self._compute_property(ctx)] = None\n\n def validate_resource(self, ctx, key, resource, source_dict):\n error_dict = {}\n # TODO how do we validate this guy?\n return error_dict\n\n\nclass CompleteURIResourceField(Field):\n \"\"\"\n Field that exposes just the URI of the complete entity of itself.\n This is useful if a resource is explicitly not including resource_uris, due to recursive inclusion,\n this field can be used, to link to the URI of the full resource version of itself.\n It adds a hard coded 'completeResourceUri' entry to the target dictionary.\n\n Parameters:\n\n ``resource_class``\n a ModelResource -- used to represent the related object needs to be\n fully addressable\n\n .. code-block:: python\n\n CompleteURIResourceField(OtherResource)\n\n .. code-block:: javascript\n\n {'completeResourceUri': '/api/other/{pk}'}\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self, resource_class, read_only=False, permission=None):\n self.init_resource_class(resource_class)\n self._read_only = read_only\n self.permission = permission\n\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n pass\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n resource = self._resource_class(source_obj)\n property_name = ctx.formatter.convert_to_public_property('complete_resource_uri')\n target_dict[property_name] = ctx.build_resource_uri(resource)\n\n\nclass URIListResourceField(Field):\n \"\"\"\n Field that exposes a list of URIs of related entity, this allows for a many to many relationship.\n\n\n Parameters:\n\n ``attribute``\n name of the relationship between the parent object and the related\n object may only be single level\n\n ``resource_class``\n a ModelResource -- used to represent the related object needs to be\n fully addressable\n\n ``published_property``\n optional -- name exposed in the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n .. code-block:: python\n\n URIListResourceField('others', OtherResource)\n\n .. code-block:: javascript\n\n {'others': ['/api/other/{pk_1}', '/api/other/{pk_2}']\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self,\n attribute,\n resource_class,\n published_property=None,\n read_only=False,\n validator=None,\n permission=None):\n self._attribute = attribute\n self.init_resource_class(resource_class)\n self._published_property = published_property\n self._read_only = read_only\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._attribute)\n\n def get_iterable(self, value):\n return value\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n attribute = getattr(target_obj, self._attribute)\n\n db_keys = set()\n db_models = {}\n for model in self.get_iterable(attribute):\n resource = self._resource_class(model)\n db_models[resource.key] = model\n db_keys.add(resource.key)\n\n new_models = []\n request_keys = set()\n\n for resource_uri in source_dict[self._compute_property(ctx)]:\n resource = ctx.resolve_resource_uri(resource_uri)\n if resource:\n request_keys.add(resource.key)\n\n if not resource.key in db_keys:\n new_models.append(resource.model)\n else:\n raise SavoryPieError(u'Unable to resolve resource uri {0}'.format(resource_uri))\n\n # Delete before add to prevent problems with unique constraints\n models_to_remove = [db_models[key] for key in db_keys - request_keys]\n # If the FK is not nullable the attribute will not have a remove\n if hasattr(attribute, 'remove'):\n attribute.remove(*models_to_remove)\n else:\n for model in models_to_remove:\n model.delete()\n\n if hasattr(attribute, 'add'):\n attribute.add(*new_models)\n else:\n for obj in new_models:\n through_parameters = {\n attribute.source_field_name: target_obj,\n attribute.target_field_name: obj\n }\n attribute.through.objects.create(**through_parameters)\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n attrs = self._attribute.split('.')\n attribute = source_obj\n\n for attr in attrs:\n attribute = getattr(attribute, attr)\n if attribute is None:\n return None\n\n resource_uris = []\n for model in self.get_iterable(attribute):\n model_resource = self._resource_class(model)\n resource_uris.append(ctx.build_resource_uri(model_resource))\n target_dict[self._compute_property(ctx)] = resource_uris\n\n\nclass SubObjectResourceField(Field):\n \"\"\"\n Field that embeds a single related resource into the parent object\n\n Parameters:\n\n ``attribute``\n name of the relationship between the parent object and the related\n object may only be single level\n\n ``resource_class``\n a ModelResource -- used to represent the related object\n\n ``published_property``\n optional -- name exposed in the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n ``validator``\n optional -- a ResourceValidator, or list/tuple of ResourceValidators, to\n validate the data in the related object\n\n .. code-block:: python\n\n SubObjectResourceField('other', OtherResource)\n\n .. code-block:: javascript\n\n {'other': {'age': 9}}\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self,\n attribute,\n resource_class,\n published_property=None,\n read_only=False,\n validator=None,\n permission=None):\n self._attribute = attribute\n self.init_resource_class(resource_class)\n self._published_property = published_property\n self._read_only = read_only\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._attribute)\n\n def get_subresource(self, ctx, source_dict, target_obj):\n \"\"\"\n Extention point called by :meth:~`savory_pie.fields.handle_incoming` to\n build a resource class around the target attribute or return None if it\n is not found. Can try looking by resource_uri etc.\n \"\"\"\n sub_source_dict = source_dict[self._compute_property(ctx)]\n resource = None\n # TODO: clean up later per bug JRUT-4708\n if sub_source_dict is not None and 'resourceUri' in sub_source_dict:\n resource = ctx.resolve_resource_uri(sub_source_dict['resourceUri'])\n else:\n try:\n attribute = getattr(target_obj, self._attribute)\n except AttributeError:\n return None\n\n resource = self._resource_class(attribute)\n\n return resource\n\n def get_submodel(self, ctx, source_object):\n return getattr(source_object, self._attribute, None)\n\n def pre_save(self, model):\n return True\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n if not source_dict:\n setattr(target_obj, self._attribute, None)\n else:\n sub_resource = self.get_subresource(ctx, source_dict, target_obj)\n\n if not sub_resource: # creating a new resource\n sub_resource = self._resource_class.create_resource()\n\n sub_source_dict = source_dict[self._compute_property(ctx)]\n\n # this is to get around django-orm limitations, where in particular\n # if you have a one-to-one field, you can't set it to None since orm doesn't like it\n # so only set the attr to None, if what's coming in is None and what's there is not already None\n if sub_source_dict is None:\n if hasattr(target_obj, self._attribute) \\\n and getattr(target_obj, self._attribute) is not None \\\n and getattr(target_obj, self._attribute).pk:\n setattr(target_obj, self._attribute, None)\n else:\n # Use the pre_save property, to determine whether we need to set the attribute before or after put\n # in the case of a ReverseSingleRelatedObject (pre_save is False), then we need to set the attribute first\n # before calling put. This is to get around the Django ORM restrictions.\n if not self.pre_save(target_obj):\n setattr(target_obj, self._attribute, sub_resource.model)\n\n with ctx.target(target_obj):\n sub_resource.put(\n ctx,\n sub_source_dict,\n skip_validation=getattr(self, '_skip_validation', False)\n )\n\n if self.pre_save(target_obj):\n setattr(target_obj, self._attribute, sub_resource.model)\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n sub_model = self.get_submodel(ctx, source_obj)\n if sub_model is None:\n target_dict[self._compute_property(ctx)] = None\n else:\n target_dict[self._compute_property(ctx)] =\\\n self._resource_class(sub_model).get(ctx, EmptyParams())\n\n def validate_resource(self, ctx, key, resource, source_dict):\n return validate(ctx, key + '.' + self.name, self._resource_class, source_dict)\n\n\nclass IterableField(Field):\n \"\"\"\n Field that embeds a many relationship into the parent object\n\n Parameters:\n\n ``attribute``\n name of the relationship between the parent object and the related\n objects can be a multi-level expression - like related_entity.many_to_many_field\n\n ``resource_class``\n a ModelResource -- used to represent the related objects\n\n ``published_property``\n optional name exposed through the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n ``iterable_factory``\n optional -- a callable which is passed the attribute and returns an\n iterable this fields exports\n\n .. code-block:: python\n\n RelatedManagerField('others', OtherResource)\n\n .. code-block:: javascript\n\n {'others': [{'age': 6}, {'age': 1}]}\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self,\n attribute,\n resource_class,\n published_property=None,\n read_only=False,\n iterable_factory=None,\n validator=None,\n permission=None):\n self._attribute = attribute\n self.init_resource_class(resource_class)\n self._published_property = published_property\n self._read_only = read_only\n self._iterable_factory = iterable_factory\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._attribute)\n\n def _get_resource(self, ctx, attribute, model_dict):\n resource = None\n if 'resourceUri' in model_dict:\n resource = ctx.resolve_resource_uri(model_dict['resourceUri'])\n elif '_id' in model_dict: # TODO what if you give an id that is not in the db?\n # TODO get key without the extra db lookup\n model = self._resource_class.get_from_queryset(\n attribute.all(),\n model_dict['_id']\n )\n resource = self._resource_class(model)\n return resource\n\n def get_iterable(self, value):\n return value\n\n @property\n def _bare_attribute(self):\n return self._attribute.split('.')[-1]\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n attribute = getattr(target_obj, self._attribute)\n\n # We are doing this outside of get_iterable so that subclasses can not\n # remove this override.\n if self._iterable_factory:\n iterable = self._iterable_factory(attribute)\n else:\n iterable = self.get_iterable(attribute)\n\n db_keys = set()\n db_models = {}\n for model in iterable:\n resource = self._resource_class(model)\n db_models[resource.key] = model\n db_keys.add(resource.key)\n\n new_models = []\n new_put_data = []\n request_keys = set()\n request_models = {}\n for model_dict in source_dict.get(self._compute_property(ctx), []):\n resource = self._get_resource(ctx, attribute, model_dict)\n if resource:\n request_models[resource.key] = resource.model\n request_keys.add(resource.key)\n # Check to see if the resource has already been saved in the DB\n if resource.key in db_keys:\n with ctx.target(resource.model):\n resource.put(ctx, model_dict)\n # If the resource has been saved to the db and the model is\n # a RelatedManager that is a through (existence of add attribute)\n # must add it to the new model since it can create a model based\n # on just the association.\n if not hasattr(attribute, 'add'):\n new_models.append(resource.model)\n else:\n # if the model is not in the database, must save it\n new_models.append(resource.model)\n\n else:\n # if the resource does not exist then this is a new instance\n new_put_data.append(model_dict)\n\n # Delete before add to prevent problems with unique constraints\n models_to_remove = [db_models[key] for key in db_keys - request_keys]\n # If the FK is not nullable the attribute will not have a remove\n if hasattr(attribute, 'remove'):\n attribute.remove(*models_to_remove)\n else:\n for obj in models_to_remove:\n # ManyRelatedManager\n if hasattr(attribute, 'through'):\n through_params = {\n attribute.source_field_name: target_obj,\n attribute.target_field_name: obj\n }\n # only delete intermediary model instance if it already exists\n for instance in attribute.through.objects.filter(**through_params):\n instance.delete()\n # RelatedManager\n else:\n obj.delete()\n\n # Delay all the new creates untill after the deletes for unique\n # constraints again\n for model_dict in new_put_data:\n model_resource = self._resource_class.create_resource()\n with ctx.target(target_obj):\n model_resource.put(ctx, model_dict, save=True)\n new_models.append(model_resource.model)\n\n if hasattr(attribute, 'add'):\n attribute.add(*new_models)\n else:\n for obj in new_models:\n through_params = {\n attribute.source_field_name: target_obj,\n attribute.target_field_name: obj\n }\n # only create intermediary model instance if it doesn't already exist\n if not attribute.through.objects.filter(**through_params).exists():\n attribute.through.objects.create(**through_params)\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n attrs = self._attribute.split('.')\n attribute = source_obj\n\n for attr in attrs:\n attribute = getattr(attribute, attr, None)\n if attribute is None:\n return None\n\n objects = []\n\n # We are doing this outside of get_iterable so that subclasses can not\n # remove this override.\n if self._iterable_factory:\n iterable = self._iterable_factory(attribute)\n else:\n iterable = self.get_iterable(attribute)\n\n for model in iterable:\n model_resource = self._resource_class(model)\n model_dict = model_resource.get(ctx, EmptyParams())\n # only add '_id' if there is no 'resourceUri'\n if 'resourceUri' not in model_dict:\n model_dict['_id'] = model_resource.key\n objects.append(model_dict)\n target_dict[self._compute_property(ctx)] = objects\n\n def validate_resource(self, ctx, key, resource, source_dict_list):\n error_dict = {}\n if self.validator:\n self.validator.find_errors(error_dict, ctx, key, resource, self, source_dict_list)\n return error_dict\n\n def schema(self, ctx, **kwargs):\n return super(IterableField, self).schema(ctx, **kwargs)\n", "repo_name": "armooo/savory-pie", "sub_path": "savory_pie/fields.py", "file_name": "fields.py", "file_ext": "py", "file_size_in_byte": 26450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "functools.wraps", "line_number": 11, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 33, "usage_type": "call"}, {"api_name": "savory_pie.errors.SavoryPieError", "line_number": 51, "usage_type": "call"}, {"api_name": "savory_pie.django.validators.ValidationError", "line_number": 157, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization", "line_number": 151, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization_adapter", "line_number": 151, "usage_type": "argument"}, {"api_name": "collections.Iterable", "line_number": 179, "usage_type": "attribute"}, {"api_name": "savory_pie.auth.authorization", "line_number": 239, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization_adapter", "line_number": 239, "usage_type": "argument"}, {"api_name": "savory_pie.auth.authorization", "line_number": 293, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization_adapter", "line_number": 293, "usage_type": "argument"}, {"api_name": "savory_pie.errors.SavoryPieError", "line_number": 380, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization", "line_number": 358, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization_adapter", "line_number": 358, "usage_type": "argument"}, {"api_name": "savory_pie.auth.authorization", "line_number": 498, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization_adapter", "line_number": 498, "usage_type": "argument"}, {"api_name": "savory_pie.resources.EmptyParams", "line_number": 541, "usage_type": "call"}, {"api_name": "savory_pie.django.validators.validate", "line_number": 544, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization", "line_number": 623, "usage_type": "call"}, {"api_name": "savory_pie.auth.authorization_adapter", "line_number": 623, "usage_type": "argument"}, {"api_name": "savory_pie.resources.EmptyParams", "line_number": 728, "usage_type": "call"}]}
+{"seq_id": "9765323242", "text": "\n#Mineria de datos\n#POS TAGGER - Formato TF-IDF\n\n#Importaciones:\nimport nltk\n# remove stop words (meaningless words for the language-meaning)\nfrom nltk.corpus import stopwords\nimport os\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nfrom pandas import DataFrame\n\n\n#Funcion que genera el POS tag de una oracion\ndef tag(s):\n\tt = nltk.word_tokenize(s)\n\treturn nltk.pos_tag(t)\n\n#Funcion que abre un archivo y devuelve su contenido\ndef gettext(f):\n\tfl = open(f,'r')\n\ttxt = \"\"\n\tfor line in fl.readlines():\n\t\t# use regular expressions to replace email addresses, URLs, phone numbers, other numbers\n\n\t\t# email addresses with 'email'\n\t\tprocessed = line.replace(r'^.+@[^\\.].*\\.[a-z]{2,}$', 'emailaddress')\n\t\t# webadress\n\t\tprocessed = processed.replace(r'^\\(?[\\d]{3}\\)?[\\s-]?[\\d]{3}[\\s-]?[\\d]{4}$', 'phonenumbr')\n\t\t# phonenumbers\n\t\tprocessed = processed.replace(r'^\\(?[\\d]{3}\\)?[\\s-]?[\\d]{3}[\\s-]?[\\d]{4}$', 'phonenumbr')\n\t\t# numbers \n\t\tprocessed = processed.replace(r'\\d+(\\.\\d+)?', 'numbr')\n\t\t# Remove punctuation\n\t\tprocessed = processed.replace(r'[^\\w\\d\\s]', ' ')\n\t\t# Replace whitespace between terms with a single space\n\t\tprocessed = processed.replace(r'\\s+', ' ')\n\t\t# Remove leading and trailing whitespace\n\t\tprocessed = processed.replace(r'^\\s+|\\s+?$', '')\n\t\t# change words to lower case - Hello, HELLO, hello are all the same word\n\t\tprocessed = processed.lower()\n\t\t# remove stopwords\n\t\tstop_words = set(stopwords.words('english'))\n\t\t#processed = processed.apply(lambda x: ' '.join(term for term in x.split() if term not in stop_words))\n\t\ttxt = txt + processed + \"\\n\"\n\n\treturn txt\n\n\n#Funcion que escribe un archivo con el resultado del pos tag\ndef wpos(f,s):\n\tfl = open(f,'w')\n\tfl.write(s)\n\n\n\ndef getCode(thelist):\n\t#returnlist\n\treturnlist = []\n\t# first define a dictionary to get the code of every POS\n\tcode_dict = {'CC':1., 'CD': 2., 'DT': 3., 'EX': 4., 'FW': 5.,\n\t\t\t\t 'IN': 6., 'JJ': 7., 'JJR': 8., 'JJS': 9., 'LS': 10.,\n\t\t\t\t 'MD': 11., 'NN': 12., 'NNS': 13., 'NNP': 14., 'NNPS': 15.,\n\t\t\t\t 'PDT': 16., 'POS': 17., 'PRP': 18., 'PRP$': 19.,\n\t\t\t\t 'RB': 20., 'RBR': 21., 'RBS': 22., 'RP': 23., 'SYM': 24.,\n\t\t\t\t 'TO': 25., 'UH': 26., 'VB': 27., 'VBD': 28., 'VBG': 29.,\n\t\t\t\t 'VBN': 30., 'VBP':31., 'VBZ': 32., 'WDT': 33., 'WP': 34., \n\t\t\t\t 'WP$': 35., 'WRB': 36.}\n\tfor pair in thelist:\n\t\ttry:\n\t\t\telement = pair[1]\n\t\t\treturnlist.append(code_dict[element])\t\t\n\t\texcept:\n\t\t\telement = 0\n\t\t\n\treturn returnlist\n\ndef padding(listoflist, list1, list2, list3):\n\treturnlist = []\n\tmaxlongitude = 0\n\n\t# this will determine the list's maximum size\n\tfor i in list1:\n\t\tnewlongitude = len(i)\n\t\tif newlongitude > maxlongitude:\n\t\t\tmaxlongitude = newlongitude\n\n\tfor i in list2:\n\t\tnewlongitude = len(i)\n\t\tif newlongitude > maxlongitude:\n\t\t\tmaxlongitude = newlongitude\n\n\tfor i in list3:\n\t\tnewlongitude = len(i)\n\t\tif newlongitude > maxlongitude:\n\t\t\tmaxlongitude = newlongitude\n\n\tfor j in listoflist:\n\t\tsize = len(j)\n\t\twhile(size < maxlongitude):\n\t\t\tj.append(0.0)\n\t\t\tsize = size + 1\n\t\treturnlist.append(j)\n\n\treturn returnlist\n\ndef addLabelAtTheEnd(listOfList, label):\n\n\tfor sublist in listOfList:\n\t\tsublist.append(label)\n\n\treturn listOfList\n\n\ndef transformToDataFrame(listOfList):\n\n\tdf = DataFrame(listOfList)\n\treturn df\n\ndef concatenateLists(list0, list1, list2):\n\tnewlist = []\n\tfor i in list0:\n\t\tnewlist.append(i)\n\tfor i in list1:\n\t\tnewlist.append(i)\n\tfor i in list2:\n\t\tnewlist.append(i)\n\n\treturn newlist\n\n\ndef normalize(listOfList):\n\t\n\treturnlist = []\n\n\t\n\tfor sublist in listOfList:\n\t\tnewlist = []\n\t\tfor i in sublist:\n\t\t\telem = i / 36.0\n\t\t\tnewlist.append(elem)\n\n\t\treturnlist.append(newlist)\n\n\treturn returnlist\n\n\ndef main():\n\t#script. Main?\n\t#Escribimos la primera linea del csv\n\tst = \"Doc_ID,TAGS,CLASS\\n\"\n\n\t#Genera una lista con los archivos de la carpeta 1\n\tdirname = \"corp/1\"\n\tfls = os.listdir(dirname)\n\ti = 0\n\tlistoflist = []\n\tlist0 = []\n\tfor f in fls:\n\t\t#Guardamos el id del texto\n\t\t#i = i + 1\n\t\t#Obtenemos los tags para f\n\t\ttags = tag(gettext(\"corp/1/\" + f))\n\t\t#Creamos un dicionario con los tags obtenidos\n\t\td = dict(tags)\n\t\t#Creamos un string auxiliar con los tags que contenia el texto\n\t\tlists = list(gettext(\"corp/1/\" + f).split(\" \"))\n\t\tlistoflist.append(lists)\n\t\t# procesar los textos separados\n\tfor li in listoflist:\n\t\tll = []\n\t\tfor j in li:\n\t\t\tt = nltk.word_tokenize(j)\n\t\t\tpair = nltk.pos_tag(t)\n\t\t\tll.append(next(iter(pair), None))\n\t\n\t\t#print(ll)\n\t\tlist0.append(ll)\n\t#corpus 2\n\tdirname = \"corp/2\"\n\tfls = os.listdir(dirname)\n\ti = 0\n\tlistoflist = []\n\tlist1 = []\n\tfor f in fls:\n\t\t#Guardamos el id del texto\n\t\t#i = i + 1\n\t\t#Obtenemos los tags para f\n\t\ttags = tag(gettext(\"corp/2/\" + f))\n\t\t#Creamos un dicionario con los tags obtenidos\n\t\td = dict(tags)\n\t\t#Creamos un string auxiliar con los tags que contenia el texto\n\t\tlists = list(gettext(\"corp/2/\" + f).split(\" \"))\n\t\tlistoflist.append(lists)\n\t# procesar los textos separados\n\tfor li in listoflist:\n\t\tll = []\n\t\tfor j in li:\n\t\t\tt = nltk.word_tokenize(j)\n\t\t\tpair = nltk.pos_tag(t)\n\t\t\tll.append(next(iter(pair), None))\n\t\n\t\t#print(ll)\n\t\tlist1.append(ll)\n\n\n\t#corpus 2\n\tdirname = \"corp/3\"\n\tfls = os.listdir(dirname)\n\ti = 0\n\tlistoflist = []\n\tlist2 = []\n\tfor f in fls:\n\t\t#Guardamos el id del texto\n\t\t#i = i + 1\n\t\t#Obtenemos los tags para f\n\t\ttags = tag(gettext(\"corp/3/\" + f))\n\t\t#Creamos un dicionario con los tags obtenidos\n\t\td = dict(tags)\n\t\t#Creamos un string auxiliar con los tags que contenia el texto\n\t\tlists = list(gettext(\"corp/3/\" + f).split(\" \"))\n\t\tlistoflist.append(lists)\n\t# procesar los textos separados\n\tfor li in listoflist:\n\t\tll = []\n\t\tfor j in li:\n\t\t\tt = nltk.word_tokenize(j)\n\t\t\tpair = nltk.pos_tag(t)\n\t\t\tll.append(next(iter(pair), None))\n\t\n\t\t#print(ll)\n\t\tlist2.append(ll)\n\n\n\tcodifiedList0 = []\n\tcodifiedList1 = []\n\tcodifiedList2 = []\n\tfor li in list0:\n\t\tnewlist = getCode(li)\n\t\tcodifiedList0.append(newlist)\n\tfor li in list1:\n\t\tnewlist = getCode(li)\n\t\tcodifiedList1.append(newlist)\n\tfor li in list2:\n\t\tnewlist = getCode(li)\n\t\tcodifiedList2.append(newlist)\n\n\n\n\t# padding\n\tcodifiedList0 = padding(codifiedList0, codifiedList0, codifiedList1, codifiedList2)\n\tcodifiedList1 = padding(codifiedList1, codifiedList0, codifiedList1, codifiedList2)\n\tcodifiedList2 = padding(codifiedList2, codifiedList0, codifiedList1, codifiedList2)\n\t# normalize using minmax\n\tcodifiedList0 = normalize(codifiedList0)\n\tcodifiedList1 = normalize(codifiedList1)\n\tcodifiedList2 = normalize(codifiedList2)\n\n\n\t# Add label at the end.\n\tcodifiedList0 = addLabelAtTheEnd(codifiedList0, 1.0)#\n\tcodifiedList1 = addLabelAtTheEnd(codifiedList1, 2.0)#\n\tcodifiedList2 = addLabelAtTheEnd(codifiedList2, 3.0)#\n\t\n\t\n\t# Concatenate all the lists in one big list\n\tconcatenated = concatenateLists(codifiedList0, codifiedList1, codifiedList2)\n\n\t\n\n\t\n\t\n\t\n\t\n\n\t#transform to dataframes:\n\tdf \t\t\t= transformToDataFrame(concatenated)\n\n\tdf_shuffled = df.sample(frac = 1)\n\n\tdf_shuffled.to_csv(\"outcome.csv\") \n\n\n\tprint(df_shuffled)\n\n\n\t\n\nif __name__ ==\"__main__\":\n\tmain()\n\n\n\n", "repo_name": "flovera1/complejidad", "sub_path": "Proyecto_complejidadTexto/tf.py", "file_name": "tf.py", "file_ext": "py", "file_size_in_byte": 6885, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "nltk.word_tokenize", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 44, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 44, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 118, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 156, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 174, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 175, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 182, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 200, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 201, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 210, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 228, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 229, "usage_type": "call"}]}
+{"seq_id": "73434360557", "text": "import asyncio\n\nfrom storage_facade import StorageFacade\n\n\nclass QueueHandler(object):\n\n parser = None\n storage_facade = None\n threads = 10\n\n def __init__(self, loop, parser, storage_facade: StorageFacade):\n self.loop = loop\n self.parser = parser\n self.parser.set_loop(loop)\n self.storage_facade = storage_facade\n print(\"Queue Handler inited\")\n\n def run(self):\n workers = []\n for _ in range(self.threads):\n workers.append(asyncio.ensure_future(self.handle()))\n self.loop.run_until_complete(asyncio.wait(workers))\n\n async def handle(self):\n while not self.storage_facade.queue_empty():\n queue_item = self.storage_facade.get_url_from_queue()\n try:\n await self.handle_queue_message(queue_item)\n except:\n print(\"Error during processing: \", queue_item)\n\n async def handle_queue_message(self, message):\n print(\"Message received: \", message)\n await self.parser.parse(message)\n\n def close(self):\n pass\n", "repo_name": "dima-kov/parser", "sub_path": "qhandler.py", "file_name": "qhandler.py", "file_ext": "py", "file_size_in_byte": 1076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "storage_facade.StorageFacade", "line_number": 12, "usage_type": "name"}, {"api_name": "asyncio.ensure_future", "line_number": 22, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "20826129876", "text": "import cherrypy\nimport os.path\nimport json\nimport time\nfrom cherrypy.lib.static import serve_file\nfrom crawler_dao import CrawlerDAO\nfrom crawler import Crawler\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\npublic_dir = os.path.join(current_dir, 'public/dist')\n\nclass Root():\n\n\tdef __init__(self):\n\t\tself.dao = CrawlerDAO()\n\t\t\n\t@cherrypy.expose\n\tdef index(self):\n\t\t#return serve_file(os.path.join(current_dir, 'index.html'), content_type='text/html')\n\t\treturn serve_file(os.path.join(public_dir , 'index.html'), content_type='text/html')\n\n\t@cherrypy.expose\n\tdef start(self, site):\n\n\t\tcherrypy.request.headers[\"Content-Type\"] = 'utf-8'\n\n\t\tdao = CrawlerDAO()\n\t\tprint(cherrypy.url())\n\n\t\tcrawler = Crawler(dao, 'http://' + site)\n\t\tcrawler.run()\n \n\t\t#FINISHED CONNECTION\n\t\tdao.connection.close()\n\n\t@cherrypy.expose\n\tdef yieldResource(self):\n\n\t\tcherrypy.response.headers[\"Content-Type\"] = \"text/event-stream\"\n\n\t\tdef content():\n\n\t\t\t# #Server time poooling\n\t\t\t# for pooling in xrange(0, 5):\n\t\t\t# \ttime.sleep(1)\n\n\t\t\treturn \"event: time\\n\" + \"data: \" + str(self.dao.select()) + \"\\n\\n\";\n\n\t\treturn content()\n\n\tyieldResource._cp_config = {'response.stream' : True, 'tools.encode.encoding' : 'utf-8'}\n\nif __name__ == '__main__':\n\n\tdao = CrawlerDAO()\t\n\tdao.reset()\n\t\t\n\tpageroot = Root()\n\n\tconf = {\n\t\t\t'/' : {\n\t\t\t \t'tools.encode.encoding': 'utf-8',\n\t\t\t\t'response.timeout' : 1000000,\n\t\t\t\t'tools.staticdir.root': current_dir\n\t\t\t},\n \n\t\t\t'/feed': {\n\t\t\t\t'tools.staticdir.on' : True,\n\t\t\t\t'tools.staticdir.dir': os.path.join(current_dir, 'public'),\n\t\t\t\t'tools.staticdir.content_types': {\n\t\t\t\t\t'rss' : 'application/json',\n\t\t\t\t\t'atom': 'application/json'\n\t\t\t\t},\n\t\t\t\t\n\t\t\t},\n\n\t\t\t'/scripts' : {\n\t\t\t\t'tools.staticdir.on' : True,\n\t\t\t\t'tools.staticdir.dir': os.path.join(public_dir,'scripts' )\n\t\t\t},\n\n\t\t\t'/styles' : {\n\t\t\t\t'tools.staticdir.on' : True,\n\t\t\t\t'tools.staticdir.dir': os.path.join(public_dir,'styles' )\n\t\t\t},\n\n\t\t\t'/views' : {\n\t\t\t\t'tools.staticdir.on' : True,\n\t\t\t\t'tools.staticdir.dir': os.path.join(public_dir, 'views' )\n\t\t\t}\n\t\t}\n\n\n\n\tcherrypy.quickstart(pageroot, config=conf)\n", "repo_name": "carloshpds/BCC-2s13-PI4-web-crawler", "sub_path": "server-sse.py", "file_name": "server-sse.py", "file_ext": "py", "file_size_in_byte": 2070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.path.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 9, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 10, "usage_type": "name"}, {"api_name": "crawler_dao.CrawlerDAO", "line_number": 15, "usage_type": "call"}, {"api_name": "cherrypy.lib.static.serve_file", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 20, "usage_type": "name"}, {"api_name": "cherrypy.expose", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 25, "usage_type": "attribute"}, {"api_name": "crawler_dao.CrawlerDAO", "line_number": 27, "usage_type": "call"}, {"api_name": "cherrypy.url", "line_number": 28, "usage_type": "call"}, {"api_name": "crawler.Crawler", "line_number": 30, "usage_type": "call"}, {"api_name": "crawler.run", "line_number": 31, "usage_type": "call"}, {"api_name": "cherrypy.expose", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cherrypy.response", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cherrypy.expose", "line_number": 36, "usage_type": "attribute"}, {"api_name": "crawler_dao.CrawlerDAO", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 79, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 84, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 89, "usage_type": "name"}, {"api_name": "cherrypy.quickstart", "line_number": 95, "usage_type": "call"}]}
+{"seq_id": "20893092129", "text": "from collections import deque\ndef bfs():\n cnt = 1\n for i in range(n):\n for j in range(m):\n if not land[i][j] and arr[i][j]:\n land[i][j] = cnt\n cnt += 1\n q = deque([])\n q.append((i,j))\n while q:\n x,y = q.popleft()\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < n and 0 <= ny < m and not land[nx][ny] and arr[nx][ny]:\n land[nx][ny] = land[x][y]\n q.append((nx,ny))\n return cnt-1\n\ndef connect(x,y,d):\n move = 1\n target = land[x][y]\n while True:\n x = x + dx[d]\n y = y + dy[d]\n if x < 0 or x >= n or y < 0 or y >= m: return -1,-1\n if land[x][y] == target: return -1,-1\n if land[x][y] == 0: move += 1; continue\n if land[x][y] != target: return move-1,land[x][y]\n\ndef make_graph():\n for x in range(n):\n for y in range(m):\n if land[x][y] != 0:\n for k in range(4):\n a = land[x][y]\n move, b = connect(x,y,k)\n if move <= 1: continue\n edges.append((move,a,b))\n\ndef find(x):\n if parents[x] != x:\n parents[x] = find(parents[x])\n return parents[x]\n\ndef union(a,b):\n a = find(a)\n b = find(b)\n if a < b:\n parents[b] = a\n else:\n parents[a] = b\ndef kruskal():\n ans = 0\n cnt = 0\n for edge in edges:\n move,a,b = edge\n if find(a) != find(b):\n union(a,b)\n ans += move\n cnt += 1\n return ans,cnt\n\ndef go():\n cnt = bfs()\n make_graph()\n edges.sort()\n ans,edge = kruskal()\n if edge == cnt-1:\n return ans\n else:\n return -1\n\ndx = [-1,0,1,0]\ndy = [0,1,0,-1]\nn,m = list(map(int,input().split()))\narr = []\nland = [[0]*m for i in range(n)]\nedges = []\nparents = [0]*10\nfor i in range(10):\n parents[i] = i\nfor i in range(n):\n arr.append(list(map(int,input().split())))\nprint(go())\n\n", "repo_name": "17wook2/Algorithm", "sub_path": "백준/17472.py", "file_name": "17472.py", "file_ext": "py", "file_size_in_byte": 2126, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "collections.deque", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "72050947117", "text": "from django.urls import path\nfrom peerToPeerPayment import views\n\nurlpatterns = [\n path('add_user/', views.create_user_view, name=\"add_user\"),\n path('deposit/', views.deposit, name=\"deposit\"),\n path('withdraw/', views.withdraw, name=\"withdraw\"),\n path('transfer/', views.transfer, name=\"transfer\"),\n path('check_balance/', views.check_balance, name=\"check_balance\"),\n]\n", "repo_name": "esiebomaj/wafi-test", "sub_path": "peerToPeerPayment/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 398, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "peerToPeerPayment.views.create_user_view", "line_number": 5, "usage_type": "attribute"}, {"api_name": "peerToPeerPayment.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "peerToPeerPayment.views.deposit", "line_number": 6, "usage_type": "attribute"}, {"api_name": "peerToPeerPayment.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "peerToPeerPayment.views.withdraw", "line_number": 7, "usage_type": "attribute"}, {"api_name": "peerToPeerPayment.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "peerToPeerPayment.views.transfer", "line_number": 8, "usage_type": "attribute"}, {"api_name": "peerToPeerPayment.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "peerToPeerPayment.views.check_balance", "line_number": 9, "usage_type": "attribute"}, {"api_name": "peerToPeerPayment.views", "line_number": 9, "usage_type": "name"}]}
+{"seq_id": "41708852934", "text": "import streamlit as st\nimport plotly_express as px\n\nfrom api_calls import get_combined_manager_history\n \nmanagers_dict = {\n \"Chistian\": 1302722,\n \"Hans-Martin\": 2584139,\n \"Andreas\": 4306388,\n}\n\ndf = get_combined_manager_history(managers_dict=managers_dict)\n\ndf = df.drop(columns=[\"rank_sort\"])\ndf[\"value\"] = df[\"value\"]/10\ndf[\"bank\"] = df[\"bank\"]/10\n\ndf = df.rename(columns={\n \"event\":\"gameweek\",\n \"rank\":\"gw_rank\",\n \"event_transfers\":\"transfers\",\n \"event_transfers_cost\":\"transfer_cost\",\n \"value\":\"team_value\",\n \"bank\":\"money_in_bank\",\n \"points_on_bench\":\"points_benched\"\n })\n\n\ndf[\"total_points_benched\"] = df.groupby([\"manager\"])[\"points_benched\"].transform(\"cumsum\")\ndf[\"total_transfers\"] = df.groupby([\"manager\"])[\"transfers\"].transform(\"cumsum\")\ndf[\"total_transfer_cost\"] = df.groupby([\"manager\"])[\"transfer_cost\"].transform(\"cumsum\")\n\n\nst.header(\"Interaktiv graf for FPL 2022/23\")\nst.text(\"Fra drop down menuen kan du vælge mellem en række variable, der beskriver \\nudviklingen henover sæsonen.\")\n\ny_vals = [col for col in df.columns if col != \"gameweek\"]\ny_axis_val = st.selectbox(\"Vælg variabel til y-aksen:\", options=y_vals)\n\n\nplot = px.line(\n df, \n x=\"gameweek\", \n y=y_axis_val,\n color=\"manager\",\n markers=True\n )\n\nreverse_vals = [\"gw_rank\",\"rank_sort\",\"overall_rank\"]\nif y_axis_val in reverse_vals:\n plot.update_yaxes(\n # autorange=\"reversed\",\n range=[10880000,0])\n\nplot.update_layout(\n xaxis = dict(\n tickmode = 'linear',\n tick0 = 1,\n dtick = 1\n )\n)\n\nst.plotly_chart(plot)", "repo_name": "Chr2507/fpl", "sub_path": "fpl/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1586, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "api_calls.get_combined_manager_history", "line_number": 12, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 38, "usage_type": "call"}, {"api_name": "plotly_express.line", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 63, "usage_type": "call"}]}
+{"seq_id": "29711412578", "text": "from enum import Enum\n\nclass RequestResponse:\n def __init__(self, page_info, results):\n self.page_info = page_info\n self.results = results\n\nclass PageInfo:\n def __init__(self, page, pages, results, page_size):\n self.page = page\n self.pages = pages\n self.results = results\n self.page_size = page_size\n\nclass UserResponse:\n def __init__(self, id, email, display_name):\n self.id = id\n self.email = email\n self.display_name = display_name\n\nclass MediaResponse:\n def __init__(self, id, external_service_id, external_service_id_4k, rating_key, status, media_type, created_at, updated_at):\n self.id = id\n self.external_service_id = external_service_id\n self.external_service_id_4k = external_service_id_4k\n self.rating_key = rating_key\n self.status = status\n self.media_type = media_type\n self.created_at = created_at\n self.updated_at = updated_at\n\nclass MediaStatus(Enum):\n UNKNOWN = 1\n PENDING = 2\n PROCESSING = 3\n PARTIALLY_AVAILABLE = 4\n AVAILABLE = 5\n\nclass MediaRequestResponse:\n def __init__(self, id, media, created_at, updated_at, requested_by):\n self.id = id\n self.media = media\n self.created_at = created_at\n self.updated_at = updated_at\n self.requested_by = requested_by\n", "repo_name": "justbest23/media-cleaner-python-broken", "sub_path": "src/overseer/overseerr_responses.py", "file_name": "overseerr_responses.py", "file_ext": "py", "file_size_in_byte": 1360, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "83", "api": [{"api_name": "enum.Enum", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "73482598992", "text": "import functools\r\n\"\"\"def dec(func):\r\n @functools.wraps(func)\r\n def do_twice(name, *args, **kwargs):\r\n # do someting before\r\n func(name)\r\n func(name)\r\n #do somethng after\r\n # return value\r\n return do_twice\r\n\r\n@dec\r\ndef say_hi(name):\r\n print(\"hi {0}\".format(name))\r\n # return value\"\"\"\r\n\r\nimport time\r\n\r\ndef timer(func):\r\n \"\"\"Print the runtime of the decorated function\"\"\"\r\n @functools.wraps(func)\r\n def wrapper_timer(*args, **kwargs):\r\n start_time = time.perf_counter() # 1\r\n value = func(*args, **kwargs)\r\n end_time = time.perf_counter() # 2\r\n run_time = end_time - start_time # 3\r\n print(f\"Finished {func.__name__!r} in {run_time:.4f} secs\")\r\n return value\r\n return wrapper_timer\r\n\r\n@timer\r\ndef waste_some_time(num_times):\r\n for _ in range(num_times):\r\n x = sum([i**2 for i in range(10001)])\r\n return x\r\n\r\nif __name__ == \"__main__\":\r\n #print(say_hi.__name__)\r\n t = waste_some_time(10)\r\n print(t)\r\n print(sum([i**2 for i in range(10001)]))\r\n", "repo_name": "zieglershai/edit_songs", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1080, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "time.perf_counter", "line_number": 23, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 25, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "27546449546", "text": "from rest_framework.generics import CreateAPIView,ListCreateAPIView\nfrom rest_framework.request import Request\nfrom .serializers import RegistroUsuarioSerializer, RegistroSerializer, MostrarFigurasSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .enviar_correos import enviar_correo_validacion\nfrom rest_framework.permissions import AllowAny,IsAuthenticated,IsAdminUser\nfrom .permisos import PermisoPersonalizado, EsAdministrador\nfrom .models import Registro\n\nclass RegistroUsuarioView(CreateAPIView):\n serializer_class = RegistroUsuarioSerializer\n permission_classes = [EsAdministrador]\n\n def post(self, request: Request):\n data = self.serializer_class(data=request.data)\n data.is_valid(raise_exception=True)\n data.save()\n\n print(enviar_correo_validacion(data.data.get('email')))\n\n return Response(data={\n 'message': 'Usuario registrado correctamente',\n 'content': ''\n }, status=status.HTTP_201_CREATED)\n\nclass RegistroFiguritasView(ListCreateAPIView):\n #comentarios\n permission_classes = [PermisoPersonalizado]\n queryset = Registro.objects.all()\n serializer_class = RegistroSerializer\n\n def get(self, request):\n id_usuario = request.user.id\n\n registros = self.get_queryset().filter(usuario = id_usuario).all()\n print(registros)\n\n # utilizar el serializador para convertir los registros a informacion leible\n return Response(data={\n 'message': 'Tu coleccion es: ',\n 'content': MostrarFigurasSerializer(instance=registros, many=True).data\n })\n\n def post(self, request):\n id_usuario = request.user.id\n # agregamos el bodyactual a la llave del usuario\n # usamos el ** para hacer la destructuracion o sacar el contenido del\n # diccionario y agregamos uno nuevo\n data = {**request.data, **{'usuario': id_usuario}}\n print(data)\n registroSerializado = self.serializer_class(data=data)\n\n registroSerializado.is_valid(raise_exception=True)\n nuevoRegistro = registroSerializado.save()\n\n return Response(data={\n 'message': 'Registro creado correctamente',\n 'content': self.serializer_class(instance=nuevoRegistro).data\n })", "repo_name": "SalvadorCT/Album_Mundial", "sub_path": "gestion/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2313, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "rest_framework.generics.CreateAPIView", "line_number": 11, "usage_type": "name"}, {"api_name": "serializers.RegistroUsuarioSerializer", "line_number": 12, "usage_type": "name"}, {"api_name": "permisos.EsAdministrador", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.request.Request", "line_number": 15, "usage_type": "name"}, {"api_name": "enviar_correos.enviar_correo_validacion", "line_number": 20, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 25, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 27, "usage_type": "name"}, {"api_name": "permisos.PermisoPersonalizado", "line_number": 29, "usage_type": "name"}, {"api_name": "models.Registro.objects.all", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Registro.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Registro", "line_number": 30, "usage_type": "name"}, {"api_name": "serializers.RegistroSerializer", "line_number": 31, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 40, "usage_type": "call"}, {"api_name": "serializers.MostrarFigurasSerializer", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "16271675003", "text": "\nfrom pyAudioAnalysis import audioBasicIO\nfrom pyAudioAnalysis import audioFeatureExtraction\nfrom pydub import AudioSegment\nimport matplotlib.pyplot as plt\nimport scipy\nimport numpy as np\nimport pandas as pd\nfrom convert_time_overlapping_df_into_time_non_overapping_df import make_non_time_overlapping_df_from_time_overlapping_df\nimport os\nfrom make_df_et_matrix import df_et_matrix_one_file\n\n\ndef divide_intvl_into_subtnvl_of_given_size(ini_pt, fin_pt, len_subintvl):\n l=[]\n while ini_pt <= fin_pt:\n l.append(ini_pt)\n ini_pt=ini_pt+len_subintvl\n l=np.array(l)\n return l\n\n\n'''\ndef chunk_sound(folder,mp3_file, chunk_dur,\\\n dir_name=\"/home/susovan/Documents/music_detection/muspeak_mirex2015/music_chunks\"):\n \n\n :param folder: str\n :param mp3_file: .mp3 file\n :param chunk_dur: scalar, the chunk duration to segment the audio\n :return: ???\n \n full_name_mp3file=os.path.join(folder,mp3_file)\n sound = AudioSegment.from_file(full_name_mp3file, format=\"mp3\")\n #sound=sound[:20*1000]\n \"If the wavfile is too large, there can be a memory issue, raising the error\" \\\n \" pydub.exceptions.CouldntDecodeError: Couldn't find data header in wav data\"\n #dir_name=\"/home/susovan/Documents/music_detection/muspeak_mirex2015/music_chunks\"\n for i, chunk in enumerate(sound[::chunk_dur*1000]):\n with open(os.path.join(dir_name,\"sound-%s.mp3\" % i) , \"wb\") as f:\n chunk.export(f, format=\"mp3\")\n return chunk\n \n '''\n\n\ndef make_time_chunks_from_df_of_start_end_times(df_nonoverlapping, chunk_dur):\n '''\n Below:\n df=df_et_matrix_one_file(folder,file1)[0]\n df_nonoverlapping=make_non_time_overlapping_df_from_time_overlapping_df(df)\n\n :param df: pd dataframe containing the array of start and end times [S,E] of music and speech with annotations (m, s)\n chunk_dur is in seconds,\n :return: list of all chunks of of a certain chunk duration chunk_dur for each S and E\n lst is the list of all chunks of duration chunk_dur if the audio seglent is at least chun_dur sec. long\n lst2 is the list of all corresponding annotations (music or speech)\n '''\n\n x = df_nonoverlapping.values[:, 0:2]# x is the matrix of start and endtimes so that music and speech don't overlap\n x = x.astype(float)\n y = df_nonoverlapping.values[:, 2] # y is the corresponding array of the corresponding annotations (m, s)\n #y=np.array([y]).T #to make y vertical\n lst=[] #list of np arrays of starttimes and endtimes of length chunk_dur, note\\\n # that x.shape[0] is the number of annotated audio segments in the initial audio file\n lst2= [] #stores the annotations 'm or s) for the above\n count_chunk_list=[]\n for i in range(x.shape[0]):#for i-th row of x\n if x[i,1]-x[i,0] >= chunk_dur:\n #ct_chunks=int((x[i,1]-x[i,0])/chunk_dur) #counts number of chunks\n #count_chunk_list.append(ct_chunks)\n #ct_chunks = round((x[i, 1] - x[i, 0]) / chunk_dur) # counts number of chunks\n #lst.append( np.linspace( x[i,0], x[i,1], ct_chunks ) )\n lst.append(divide_intvl_into_subtnvl_of_given_size(ini_pt=x[i,0], fin_pt=x[i,1], len_subintvl=chunk_dur))\n tmp = y[i]\n #lst2[i].append( tmp for k in range(ct_chunks))\n ct_chunks= len( divide_intvl_into_subtnvl_of_given_size(ini_pt=x[i,0], fin_pt=x[i,1], len_subintvl=chunk_dur) )\n lst2.append( [tmp for k in range(ct_chunks)] )\n\n list_start_endtime_for_chunks= []\n list_annotation_for_chunks=[]\n #df_start_endtime_chunks=pd.DataFrame({''})\n for i in range(len(lst)):\n if lst[i]!= []:\n list_start_endtime_for_chunks.append(lst[i])\n list_annotation_for_chunks.append(lst2[i])\n df_start_endtime_annotation_chunks = pd.DataFrame({'list_start_endtime_for_chunks': list_start_endtime_for_chunks, 'list_annotation_for_chunks': list_annotation_for_chunks})\n return df_start_endtime_annotation_chunks\n\ndef make_sound_chunks_from_df_start_endtime_annotation_chunks(df_start_endtime_annotation_chunks, audio, \\\n dir_name = \"/home/susovan/Documents/music_detection/muspeak_mirex2015/music_chunks\"):\n '''\n\n\n :param df_start_endtime_annotation_chunks: defined above, it's a df that contains 1) list a of np arrays that stores\n the vectors of chunk_dur long sound chunk from an audio file, and 2) list of corr. annotations, m or s.\n audio: initial audio file, ideally would have 1 channel\n :param dir_name: str, name of dir where audio files will be saved\n :return: corresponding df of audio segments (using pydub) and corresponding annotations\n '''\n\n audio = AudioSegment.from_file(audio, format=\"mp3\").set_channels(1)\n df_col_names=df_start_endtime_annotation_chunks.columns.values #gives the names of the columns\n list_start_endtime_for_chunks=df_start_endtime_annotation_chunks[df_col_names[0]].tolist()\n list_annotation_for_chunks=df_start_endtime_annotation_chunks[df_col_names[1]].tolist()\n audio_seg_lst=[] #contains the audio seg objects, without being exported\n audio_annotation_lst=[]\n chunk_list = [] #contains the corrsponding audio segments in audio seg_list after being exported as .mp3/wav\n ctr=-1\n for i in range(len(list_start_endtime_for_chunks)): #for i th array annotated time intervals of the original file\n for j in range(len(list_start_endtime_for_chunks[i])-1): #for j th time chunk of the the i -th annotated time interval\n start_tm_chunk_in_ms = list_start_endtime_for_chunks[i][j]*1000\n end_tm_chunk_in_ms = list_start_endtime_for_chunks[i][j+1]*1000 #times in millisec.\n audio_seg = audio[start_tm_chunk_in_ms:end_tm_chunk_in_ms]\n audio_seg_lst.append(audio_seg)\n audio_annotation_lst.append(list_annotation_for_chunks[i][j])\n #audio_seg_handle = audio_seg.export(os.path.join(dir_name,\"sound-%s.mp3\" % j), format=\"mp3\")\n ctr = ctr + 1\n audio_seg_handle = audio_seg.export(os.path.join(dir_name, \"sound-%s.wav\" % ctr), format=\"wav\", parameters=[\"-ac\", \"1\"])\n chunk_list.append(os.path.join(dir_name,\"sound-%s.wav\" % ctr))\n\n\n return audio_seg_lst, audio_annotation_lst, chunk_list\n\n\n\n\n\n\n'''\n#TEST the above fn:\nfolder=\"/home/susovan/Documents/music_detection/muspeak_mirex2015/muspeak-mirex2015-detection-examples\"\n#wav_file=\"ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.mp3.wav\"\n#mp3_file=\"ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.mp3\"\nmp3_file=\"ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.mp3\"\nchunk_dur=10\nchunk=chunk_sound(folder,mp3_file,chunk_dur,dir_name=folder)\nprint( \"\\n chunks are \\n\")\n\n'''\n\n'''\n\n#TEST the fn make_time_chunks_from_df_of_start_end_times:\n\nfolder='/home/susovan/Documents/music_detection/muspeak_mirex2015/muspeak-mirex2015-detection-examples'\nfile1='/ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.csv'\ndf=df_et_matrix_one_file(folder,file1)[0]\ndf_nonoverlapping=make_non_time_overlapping_df_from_time_overlapping_df(df)\ntmp2=make_time_chunks_from_df_of_start_end_times(df_nonoverlapping, chunk_dur=10)\nprint( '\\n The df containing segments of chunk_dur and annotations are below \\n' + str(tmp2) )\n\n'''\n\n\n'''\n#TEST make_sound_chunks_from_df_start_endtime_annotation_chunks\n\nfolder='/home/susovan/Documents/music_detection/muspeak_mirex2015/muspeak-mirex2015-detection-examples'\n#file1='ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.csv'\n#file1_mp3= '/ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.mp3'\n\n\nfile2='ConscinciasParalelasN7-OsSentidosOSentirEAsNormasParte715-1-1994.csv'\nfile2_mp3='ConscinciasParalelasN7-OsSentidosOSentirEAsNormasParte715-1-1994.mp3'\n\n\n#file3='ConscinciasParalelasN7-OsSentidosOSentirEAsNormasParte715-1-1994.csv'\n#file3_mp3='ConscinciasParalelasN7-OsSentidosOSentirEAsNormasParte715-1-1994.mp3'\n\n\ndf=df_et_matrix_one_file(folder,file2)[0]\ndf_nonoverlapping=make_non_time_overlapping_df_from_time_overlapping_df(df)\nprint(\"df_nonoverlapping is \\n\" + str(df_nonoverlapping))\ndf_start_endtime_annotation_chunks= make_time_chunks_from_df_of_start_end_times(df_nonoverlapping, chunk_dur=10)\n\naudio= os.path.join(folder, file2_mp3)\n#audio= folder + file2_mp3\n#audio = AudioSegment.from_file(audio, format=\"mp3\", chennels=1)\n#audio = AudioSegment.from_file(audio, format=\"wav\", chennels=1)\naudio_seg_list=make_sound_chunks_from_df_start_endtime_annotation_chunks(df_start_endtime_annotation_chunks, audio)[0]\naudio_annotation_list=make_sound_chunks_from_df_start_endtime_annotation_chunks(df_start_endtime_annotation_chunks, audio)[1]\nchunk_list=make_sound_chunks_from_df_start_endtime_annotation_chunks(df_start_endtime_annotation_chunks, audio)[2]\nprint( \"\\n df_start_endtime_annotation_chunks is \\n\" + str(df_start_endtime_annotation_chunks) )\nprint( \"\\n audio_seg_list \\n\" + str(audio_seg_list) )\nprint( \"\\n audio_annotation_list \\n\" + str(audio_annotation_list) )\nprint(\"\\n chunk_list is \\n\" + str(chunk_list) )\n\n'''", "repo_name": "susovan-batvoice/music_detection", "sub_path": "slice_sound.py", "file_name": "slice_sound.py", "file_ext": "py", "file_size_in_byte": 9133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 87, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 102, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 102, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}]}
+{"seq_id": "24912998728", "text": "import socket\r\nimport keyboard as kb\r\nimport time\r\n\r\nspecial_keys = {\r\n 'A': 'a', 'B': 'b', 'C': 'c', 'D': 'd', 'E': 'e', 'F': 'f', 'G': 'g', 'H': 'h', 'I': 'i', 'J': 'j',\r\n 'K': 'k', 'L': 'l', 'M': 'm', 'N': 'n', 'O': 'o', 'P': 'p', 'Q': 'q', 'R': 'r', 'S': 's', 'T': 't',\r\n 'U': 'u', 'V': 'v', 'W': 'w', 'X': 'x', 'Y': 'y', 'Z': 'z', '!': '1', '@': '2', '#': '3', '$': '4',\r\n '%': '5', '^': '6', '&': '7', '*': '8', '(': '9', ')': '0', '~': '`', '_': '-', '+': '=', '{': '[',\r\n '}': ']', '|': '\\\\', ':': ';', '\"': \"'\", '<': ',', '>': '.', '?': '/'\r\n}\r\n\r\ndef main():\r\n host = \"192.168.23.1\" # Server IP address\r\n port = 12346 # Server port\r\n\r\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client_socket.connect((host, port))\r\n\r\n print(\"Connected to the server\")\r\n\r\n try:\r\n while True:\r\n data = client_socket.recv(1024)\r\n if not data:\r\n break\r\n else:\r\n is_pressed, key = data.decode().split(\",\")\r\n if is_pressed == \"True\":\r\n if key in special_keys:\r\n kb.press(special_keys[key])\r\n else:\r\n kb.press(key)\r\n\r\n else:\r\n if key in special_keys:\r\n kb.release(special_keys[key])\r\n else:\r\n kb.release(key)\r\n time.sleep(0.01)\r\n\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n client_socket.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "repo_name": "noamkv/pc-remote-controller", "sub_path": "client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1576, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "socket.socket", "line_number": 17, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 17, "usage_type": "attribute"}, {"api_name": "keyboard.press", "line_number": 31, "usage_type": "call"}, {"api_name": "keyboard.press", "line_number": 33, "usage_type": "call"}, {"api_name": "keyboard.release", "line_number": 37, "usage_type": "call"}, {"api_name": "keyboard.release", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "25152583384", "text": "\"\"\"\n.\n\"\"\"\nimport numpy as np\nimport numpy.typing as npt\nimport scipy\nfrom lqr import SmoothTrack\n\n\nclass SolverMatrices: # pylint: disable=too-few-public-methods\n \"\"\"\n This class contains the helper matrices that\n will be used to calulate the solver input.\n\n Attributes\n ----------\n matP : npt.NDArray[np.float64]\n The P matrix, matP = [matPXX,matPXY,matPYY]\n matPrime : npt.NDArray[np.float64]\n The prime matrix, matPrime = [xPrime,yPrime]\n matT : npt.NDArray[np.float64]\n The T matrix, matT = [matTC,matTNX,matTNY]\n matQ : npt.NDArray[np.float64]\n The Q matrix, matQ = [matQX,matQY]\n curvPart : npt.NDArray[np.float64]\n The curvature part of the matrix\n \"\"\"\n\n def __init__(self, track: SmoothTrack) -> None:\n self.matP: npt.NDArray[np.float64] = np.array([None, None, None])\n self.matPrime: npt.NDArray[np.float64] = np.array([None, None])\n self.matT: npt.NDArray[np.float64] = np.array([None, None, None])\n self.matQ: npt.NDArray[np.float64] = np.array([None, None])\n self.curvPart: npt.NDArray[np.float64] = np.array(None)\n self.setupMatrices(track)\n\n def setupMatrices(self, track: SmoothTrack) -> None:\n \"\"\"\n This function sets up the matrices for the optimization problem.\n\n Parameters\n ----------\n track : Track\n The track data.\n self : SolverMatrices\n The helper matrices for the optimization problem.\n \"\"\"\n # create extraction matrix for b_i coefficients used in gradient\n extMatB = np.zeros((track.noPoints, track.noSplines * 4), dtype=int)\n for i in range(track.noSplines):\n extMatB[i, i * 4 + 1] = 1 # 1 * b_ix = E_x * x\n # create extraction matrix -> only c_i coefficients of the\n # solved linear equation system are needed for curvature information\n extMatC = np.zeros((track.noPoints, track.noSplines * 4), dtype=int)\n\n for i in range(track.noSplines):\n extMatC[i, i * 4 + 2] = 2 # 2 * c_ix = D_x * x\n # ax=b --> (track.trackCoeffs.alpha)*(T_C) = (extMatC)\n tempTC = scipy.sparse.linalg.spsolve(track.trackCoeffs.alpha.T, extMatC.T)\n self.matT[0] = tempTC.T\n # set up matMX and matMY matrices\n matMX = np.zeros((track.noSplines * 4, track.noPoints))\n matMY = np.zeros((track.noSplines * 4, track.noPoints))\n\n for i in range(track.noSplines):\n j = i * 4\n\n if i < track.noPoints - 1:\n matMX[j, i] = track.trackCoeffs.normVectors[i, 0]\n matMX[j + 1, i + 1] = track.trackCoeffs.normVectors[i + 1, 0]\n\n matMY[j, i] = track.trackCoeffs.normVectors[i, 1]\n matMY[j + 1, i + 1] = track.trackCoeffs.normVectors[i + 1, 1]\n else:\n matMX[j, i] = track.trackCoeffs.normVectors[i, 0]\n matMX[j + 1, 0] = track.trackCoeffs.normVectors[0, 0] # close spline\n\n matMY[j, i] = track.trackCoeffs.normVectors[i, 1]\n matMY[j + 1, 0] = track.trackCoeffs.normVectors[0, 1]\n\n # set up self.matQ[0] and self.matQ[1] matrices including the point coordinate information\n self.matQ[0] = np.zeros((track.noSplines * 4, 1))\n self.matQ[1] = np.zeros((track.noSplines * 4, 1))\n\n for i in range(track.noSplines):\n j = i * 4\n\n if i < track.noPoints - 1:\n self.matQ[0][j, 0] = track.path[i, 0]\n self.matQ[0][j + 1, 0] = track.path[i + 1, 0]\n\n self.matQ[1][j, 0] = track.path[i, 1]\n self.matQ[1][j + 1, 0] = track.path[i + 1, 1]\n else:\n self.matQ[0][j, 0] = track.path[i, 0]\n self.matQ[0][j + 1, 0] = track.path[0, 0]\n\n self.matQ[1][j, 0] = track.path[i, 1]\n self.matQ[1][j + 1, 0] = track.path[0, 1]\n\n # set up self.matP[0], self.matP[1], self.matP[2] matrices\n tempTB = scipy.sparse.linalg.spsolve(track.trackCoeffs.alpha.T, extMatB.T)\n matTB = tempTB.T\n self.matPrime = np.array([None, None, None, None, None])\n self.matPrime[0] = np.eye(track.noPoints, track.noPoints) * np.matmul(matTB, self.matQ[0])\n self.matPrime[1] = np.eye(track.noPoints, track.noPoints) * np.matmul(matTB, self.matQ[1])\n\n self.matPrime[2] = np.power(self.matPrime[0], 2)\n self.matPrime[3] = np.power(self.matPrime[1], 2)\n self.matPrime[4] = -2 * np.matmul(self.matPrime[0], self.matPrime[1])\n curvDen = np.power(\n self.matPrime[2] + self.matPrime[3], 1.5\n ) # calculate curvature denominator\n self.curvPart = np.divide(\n 1, curvDen, out=np.zeros_like(curvDen), where=curvDen != 0\n ) # divide where not zero (diag elements)\n curvPartSq = np.power(self.curvPart, 2)\n self.matP[0] = np.matmul(curvPartSq, self.matPrime[3])\n self.matP[2] = np.matmul(curvPartSq, self.matPrime[2])\n self.matP[1] = np.matmul(curvPartSq, self.matPrime[4])\n\n # SET UP FINAL MATRICES FOR SOLVER\n self.matT[1] = np.matmul(self.matT[0], matMX)\n self.matT[2] = np.matmul(self.matT[0], matMY)\n", "repo_name": "ahmedsalahacc/fs-system", "sub_path": "navigation/lqr/src/lqr/solverMatrices.py", "file_name": "solverMatrices.py", "file_ext": "py", "file_size_in_byte": 5226, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "lqr.SmoothTrack", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.typing.NDArray", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.typing", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.float64", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.typing.NDArray", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.typing", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.float64", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.typing.NDArray", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.typing", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.float64", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.typing.NDArray", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.typing", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.float64", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.typing.NDArray", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.typing", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.float64", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "lqr.SmoothTrack", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.spsolve", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 83, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.spsolve", "line_number": 102, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 124, "usage_type": "call"}]}
+{"seq_id": "40572424771", "text": "import json\nimport urllib.request\n\nfrom soynlp.utils import DoublespaceLineCorpus\nfrom soynlp.noun import LRNounExtractor_v2\nfrom soynlp.word import WordExtractor\nfrom soynlp.tokenizer import LTokenizer\nfrom krwordrank.word import KRWordRank\n\n\ndef search_in_naver(search_words, field='blog'):\n valid_fields = [\n 'news',\n 'blog',\n 'shop',\n 'movie',\n 'image',\n 'doc',\n 'book',\n 'cafearticle'\n ]\n\n # If requested field not in valid fields\n if field not in valid_fields:\n return 400, 'Not valid field'\n\n search_words = urllib.parse.quote(search_words)\n request_url = 'https://openapi.naver.com/v1/search/{}?query={}&display=100'.format(field, search_words)\n\n # Read id and secret\n with open('text_analysis/config.json', 'r', encoding='UTF-8') as config_file:\n config = json.loads(config_file.readline())\n id = config['id']\n secret = config['secret']\n\n # Request\n request = urllib.request.Request(request_url)\n request.add_header('X-Naver-Client-Id', id)\n request.add_header('X-Naver-Client-Secret', secret)\n response = urllib.request.urlopen(request)\n\n # Response\n rescode = response.getcode()\n if (rescode == 200):\n response_body = json.loads(response.read().decode('utf-8'))\n return 200, response_body\n else:\n return rescode, 'Error from naver'\n\n\ndef get_tokenizer(sentences):\n word_extractor = WordExtractor()\n\n word_extractor.train(sentences)\n words = word_extractor.extract()\n cohesion_score = {word:score.cohesion_forward for word, score in words.items()}\n\n noun_extractor = LRNounExtractor_v2()\n nouns = noun_extractor.train_extract(sentences) # list of str like\n\n noun_scores = {noun:score.score for noun, score in nouns.items()}\n combined_scores = {noun:score + cohesion_score.get(noun, 0)\n for noun, score in noun_scores.items()}\n\n tokenizer = LTokenizer(scores=combined_scores)\n\n return noun_extractor, nouns, tokenizer\n\n\ndef get_tokenized_nouns(sentences, noun_extractor, nouns, tokenizer):\n tokenized_sentences = []\n for sent in sentences:\n tokenized_sentences.append(tokenizer.tokenize(sent))\n compound_nouns = {}\n for compound_noun, sub_nouns in noun_extractor._compounds_components.items():\n compound_nouns[compound_noun] = list(sub_nouns)\n\n tokenized_nouns = []\n for sent in tokenized_sentences:\n temp_sents = []\n for word in sent:\n if (word in nouns) and (word not in compound_nouns) and (len(word) > 1):\n temp_sents.append(word)\n elif word in compound_nouns:\n temp_sents.extend(compound_nouns[word])\n tokenized_nouns.append(temp_sents)\n\n tokenized_nouns = [nouns for nouns in tokenized_nouns if len(nouns) > 3]\n\n return tokenized_nouns\n\n\ndef get_2gram(sentences):\n ngram_sentences = []\n\n for sentence in sentences:\n ngrams = []\n for idx in range(len(sentence) - 1):\n ngrams.append('{}_{}'.format(sentence[idx], sentence[idx + 1]))\n ngram_sentences.append(ngrams)\n\n return ngram_sentences\n\n\ndef wordrank(words):\n min_count = 3 # 단어의 최소 출현 빈도수 (그래프 생성 시)\n max_length = 12 # 단어의 최대 길이\n wordrank_extractor = KRWordRank(min_count=min_count, max_length=max_length)\n\n beta = 0.8 # PageRank의 decaying factor beta\n max_iter = 50\n keywords, rank, graph = wordrank_extractor.extract(words, beta, max_iter)\n return keywords\n\n\ndef filter_two_gram(keywords, n):\n ret = []\n for keyword, score in keywords.items():\n if keyword[-1] != '_' and keyword.find('_') != -1:\n ret.append((keyword.replace('_', ' '), score))\n return ret[:n]\n\n\ndef filter_one_gram(keywords, n):\n ret = [(keyword, score) for keyword, score in keywords.items()]\n return ret[:n]\n\n\ndef get_keywords(title, sentences, n):\n # n means how many keywords do you want\n # Get nouns and tokenizer\n noun_extractor, nouns, tokenizer = get_tokenizer(sentences)\n\n # Get nouns from title\n keywords_in_title = []\n for noun in nouns.keys():\n if noun in title and len(noun) != 1:\n keywords_in_title.append(noun)\n\n # Get nouns from content\n tokenized_nouns = get_tokenized_nouns(sentences, noun_extractor, nouns, tokenizer)\n one_gram_nouns = [' '.join(nouns) for nouns in tokenized_nouns]\n two_gram_nouns = get_2gram(tokenized_nouns)\n two_gram_nouns = [' '.join(nouns) for nouns in two_gram_nouns]\n\n # Get keywords\n one_gram_keywords = filter_one_gram(wordrank(one_gram_nouns), int(n * 0.3))\n two_gram_keywords = filter_two_gram(wordrank(two_gram_nouns), n - int(n * 0.3))\n keywords_in_content = [keywordInfo[0] for keywordInfo in one_gram_keywords + two_gram_keywords]\n\n return keywords_in_content + keywords_in_title\n", "repo_name": "c0510gy/InfoSearcher", "sub_path": "backend/infoSearcherServer/text_analysis/apis.py", "file_name": "apis.py", "file_ext": "py", "file_size_in_byte": 4877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "83", "api": [{"api_name": "urllib.request.parse.quote", "line_number": 27, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 27, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 27, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 37, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 37, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 37, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 40, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 40, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 40, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 45, "usage_type": "call"}, {"api_name": "soynlp.word.WordExtractor", "line_number": 52, "usage_type": "call"}, {"api_name": "soynlp.noun.LRNounExtractor_v2", "line_number": 58, "usage_type": "call"}, {"api_name": "soynlp.tokenizer.LTokenizer", "line_number": 65, "usage_type": "call"}, {"api_name": "krwordrank.word.KRWordRank", "line_number": 108, "usage_type": "call"}]}
+{"seq_id": "23436237144", "text": "from PyQt5.QtCore import Qt, QSize, QTimer\nfrom PyQt5.QtWidgets import QApplication, QWidget, QTabWidget, QSlider, QLabel, QPushButton, QHBoxLayout, QFormLayout, QGridLayout\nfrom PyQt5.QtGui import QImage, QPixmap\nimport cv2\nimport sys\n\nclass MainApp(QTabWidget):\n\n def __init__(self):\n QTabWidget.__init__(self)\n self.acquisition_tab = QWidget()\n self.tab2 = QWidget()\n self.tab3 = QWidget()\n self.video_size = QSize(1280, 720)\n self.addTab(self.acquisition_tab,\"Acquisition\")\n self.addTab(self.tab2,\"Tab 2\")\n self.addTab(self.tab3,\"Tab 3\")\n self.acquisition_tab_UI()\n self.setWindowTitle(\"tab demo\")\n\n def acquisition_tab_UI(self):\n \"\"\"Initialize widgets.\n \"\"\"\n self.image_label = QLabel()\n self.image_label.setFixedSize(self.video_size)\n\n start_preview_button = QPushButton(\"Start preview\")\n start_preview_button.clicked.connect(self.setup_camera)\n\n stop_preview_button = QPushButton(\"Stop preview\")\n stop_preview_button.clicked.connect(self.stop_preview)\n\n quit_button = QPushButton(\"Quit\")\n quit_button.clicked.connect(self.close)\n\n save_button = QPushButton(\"Save\")\n save_button.clicked.connect(self.saveVideo)\n\n brightness_label = QLabel(\"Brightness\")\n brightness_slider = QSlider(Qt.Horizontal, self)\n brightness_slider.setFocusPolicy(Qt.NoFocus)\n brightness_slider.valueChanged[int].connect(self.changedBrightnessValue)\n brightness_slider.setMaximum(255)\n\n contrast_label = QLabel(\"Contrast\")\n contrast_slider = QSlider(Qt.Horizontal, self)\n contrast_slider.setFocusPolicy(Qt.NoFocus)\n contrast_slider.valueChanged[int].connect(self.changedContrastValue)\n contrast_slider.setMaximum(255)\n\n saturation_label = QLabel(\"Saturation\")\n saturation_slider = QSlider(Qt.Horizontal, self)\n saturation_slider.setFocusPolicy(Qt.NoFocus)\n saturation_slider.valueChanged[int].connect(self.changedSaturationValue)\n saturation_slider.setMaximum(255)\n\n l2 = QFormLayout();\n l2.addWidget(QLabel());\n l2.addWidget(QLabel());\n l2.addWidget(QLabel());\n l2.addWidget(QLabel());\n l2.addWidget(QLabel());\n l2.addRow(brightness_label, brightness_slider)\n l2.addRow(contrast_label, contrast_slider)\n l2.addRow(saturation_label, saturation_slider)\n\n l3 = QHBoxLayout();\n l3.addWidget(start_preview_button)\n l3.addWidget(stop_preview_button)\n l3.addWidget(save_button)\n l3.addWidget(quit_button)\n\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.image_label, 0, 0)\n grid.addLayout(l3,1,0)\n\n layout = QHBoxLayout();\n layout.addLayout(grid);\n layout.addLayout(l2);\n self.acquisition_tab.setLayout(layout)\n\n def changedBrightnessValue(self,value):\n brightness = (value - 0)/(255 - 0)\n self.capture.set(10,brightness)\n\n def changedContrastValue(self,value):\n contrast = (value - 0)/(255 - 0)\n self.capture.set(11,contrast)\n\n def changedSaturationValue(self,value):\n saturation = (value - 0)/(255 - 0)\n self.capture.set(12,saturation)\n\n def setup_camera(self):\n \"\"\"Initialize camera.\n \"\"\"\n self.capture = cv2.VideoCapture(0)\n self.capture.set(3, self.video_size.width())\n self.capture.set(4, self.video_size.height())\n\n self.timer = QTimer()\n self.timer.timeout.connect(self.display_video_stream)\n self.timer.start(30)\n\n def display_video_stream(self):\n \"\"\"Read frame from camera and repaint QLabel widget.\n \"\"\"\n _, frame = self.capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.flip(frame, 1)\n image = QImage(frame, frame.shape[1], frame.shape[0],\n frame.strides[0], QImage.Format_RGB888)\n self.image_label.setPixmap(QPixmap.fromImage(image))\n\n def saveVideo(self):\n print(\"algo\")\n\n def stop_preview(self):\n self.timer.stop();\n self.capture.release();\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = MainApp()\n win.show()\n sys.exit(app.exec_())\n", "repo_name": "malandaj/opencv-video-test", "sub_path": "pyqt-test.py", "file_name": "pyqt-test.py", "file_ext": "py", "file_size_in_byte": 4325, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "83", "api": [{"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 7, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTabWidget.__init__", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 10, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 13, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Horizontal", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Horizontal", "line_number": 46, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Horizontal", "line_number": 52, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 52, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 53, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 53, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFormLayout", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 58, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 60, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 62, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 98, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 110, "usage_type": "attribute"}, {"api_name": "cv2.flip", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 112, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_RGB888", "line_number": 113, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 113, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 114, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 114, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 126, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 126, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 129, "usage_type": "call"}]}
+{"seq_id": "42433165025", "text": "from util import *\n\n\n@apply\ndef apply(eq_max, eq_min, Q, K, V): \n ((((i, l), d), S[i - l + 1]), i_limit), β = eq_max.of(Equal[Lamda[Max[Mod[Expr + 1 - Expr]]]])\n S[i], S[0], n = i_limit\n\n (((S[i], u), S[n]), S[i_limit]), ζ = eq_min.of(Equal[Lamda[Min[Add]]])\n \n S[n], d_z = Q.shape\n\n indices = slice(β[i], ζ[i], d)\n\n return Equal(softmax(Q @ K.T / sqrt(d_z) + (BandPart[l - 1, u - 1, d](OneMatrix(n, n)) - 1) * oo) @ V, Lamda[i:n](softmax(Q[i] @ (K[indices]).T / sqrt(d_z)) @ (V[indices])))\n\n\n@prove\ndef prove(Eq):\n from axiom import keras\n\n n, l, u, d_z, d = Symbol(integer=True, positive=True)\n i = Symbol(integer=True)\n Q = Symbol(real=True, shape=(n, d_z))\n K = Symbol(real=True, shape=(n, d_z))\n V = Symbol(real=True, shape=(n, d_z))\n β, ζ = Symbol(shape=(n,), integer=True)\n (Eq.beta, Eq.zeta), Eq.objective = apply(Equal(β, Lamda[i:n](Max(i - l + 1, (i - l + 1) % d))), Equal(ζ, Lamda[i:n](Min(i + u, n))), Q, K, V)\n\n A = Symbol(Eq.objective.find(Mul[MatMul]))\n Eq << keras.eq_max.eq_min.imply.eq.matmul.softmax.band_part_mask.dilated.apply(Eq.beta, Eq.zeta, A, V)\n\n Eq << Eq[-1].subs(A.this.definition)\n\n \n \n\n\nif __name__ == '__main__':\n run()\n# created on 2022-01-01\n# updated on 2022-03-30\n", "repo_name": "cosmosZhou/sympy", "sub_path": "axiom/keras/eq_max/eq_min/imply/eq/matmul/softmax/band_part_mask/dilated/bert.py", "file_name": "bert.py", "file_ext": "py", "file_size_in_byte": 1274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "83", "api": [{"api_name": "axiom.keras.eq_max.eq_min.imply.eq.matmul.softmax.band_part_mask.dilated.apply", "line_number": 31, "usage_type": "call"}, {"api_name": "axiom.keras.eq_max", "line_number": 31, "usage_type": "attribute"}, {"api_name": "axiom.keras", "line_number": 31, "usage_type": "name"}]}
+{"seq_id": "17764067830", "text": "from django.core.paginator import Paginator\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import JsonResponse, HttpResponse, Http404\nimport json\nimport datetime\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nimport requests\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, CreateView\n\nfrom .forms import CreateUserForm, AddApplicationCustomer\nfrom .models import *\nfrom .utils import cookieCart, cartData, guestOrder #AddApplicationUtils\nfrom django.contrib.messages.views import SuccessMessageMixin\n\nmenu = [{'title': \"Головна сторінка\", 'url_name': 'index'},\n {'title': \"Купити насіння\", 'url_name': 'store'},\n {'title': \"Послуга очистки насіння\", 'url_name': 'clean_seed'},\n {'title': \"Про нас\", 'url_name': 'about'},\n {'title': \"Місцезнаходження\", 'url_name': 'we_on_map'},\n {'title': \"Доставка та оплата\", 'url_name': 'delivery'},\n {'title': \"Контакти\", 'url_name': 'contact'}\n]\n\n\nclass AgroHome(ListView):\n model = Product\n template_name = 'seed/index.html'\n context_object_name = 'products'\n #extra_context = {'title': 'Головна сторінка'}\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n contex = super().get_context_data(**kwargs)\n data = cartData(self.request)\n contex['cartItems'] = data['cartItems']\n contex['menu'] = menu\n return contex\n\n\n def get_queryset(self):\n return Product.objects.filter(is_published = True)\n\n\n\ndef main(request):\n\n context = {}\n return render(request, 'seed/main.html', context)\n\nclass Store(ListView):\n paginate_by = 6\n model = Product\n template_name = 'seed/store.html'\n context_object_name = 'products'\n products = Product.objects.all().select_related('cat')\n categories = Category.objects.all()\n\n def get_queryset(self):\n serch_query = self.request.GET.get('search', '')\n products_2 = Product.objects.filter(Q(name_prod__icontains=serch_query) | Q(content__icontains=serch_query)).select_related('cat')\n return products_2\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n contex = super().get_context_data(**kwargs)\n data = cartData(self.request)\n contex['cartItems'] = data['cartItems']\n contex['menu'] = menu\n contex['products_2'] = self.get_queryset\n contex['cat_selected'] = 0\n contex['categories'] = self.categories\n contex['products'] = self.products\n\n return contex\n\n\ndef cart(request):\n data = cartData(request)\n cartItems = data['cartItems']\n order = data['order']\n items = data['items']\n\n context = {'items': items,\n 'order': order,\n 'cartItems': cartItems,\n 'menu': menu,\n }\n return render(request, 'seed/cart.html', context)\n\n\ndef checkout(request):\n data = cartData(request)\n cartItems = data['cartItems']\n order = data['order']\n items = data['items']\n\n context = {'items': items,\n 'order': order,\n 'cartItems': cartItems,\n 'menu': menu,\n }\n return render(request, 'seed/checkout.html', context)\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n\n print('action:', action)\n print('productId:', productId)\n\n customer = request.user.customer\n product = Product.objects.get(pk=productId)\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n orderItem, create = OrderItem.objects.get_or_create(order=order, product=product)\n if action == \"add\":\n orderItem.quantity = (orderItem.quantity + 1)\n elif action == \"add2\":\n orderItem.quantity = (orderItem.quantity + 10)\n elif action == \"add3\":\n orderItem.quantity = (orderItem.quantity + 100)\n elif action == \"add4\":\n orderItem.quantity = (orderItem.quantity + 5)\n elif action == \"remove\":\n orderItem.quantity = (orderItem.quantity - 1)\n elif action == \"remove2\":\n orderItem.quantity = (orderItem.quantity - 10)\n elif action == \"remove3\":\n orderItem.quantity = (orderItem.quantity - 100)\n elif action == \"remove4\":\n orderItem.quantity = (orderItem.quantity - 5)\n orderItem.save()\n if orderItem.quantity <= 0:\n orderItem.delete()\n\n return JsonResponse('Item was added', safe=False)\n\ndef multi(a,b):\n if a and b:\n return a * b or print(f'These {a} and {b} aren`t numbers!')\n\n\ndef show_post(request, prod_slug):\n pub = get_object_or_404(Product, slug=prod_slug)\n data = cartData(request)\n cartItems = data['cartItems']\n categories = Category.objects.all()\n products = Product.objects.all().select_related('cat')\n if request.method == 'POST':\n try:\n acre_val = int(request.POST.get('acre_val', False))\n rate_val = int(request.POST.get('rate_val', False))\n res = multi(acre_val, rate_val)\n except:\n return redirect('store')\n else:\n res = 0\n context = {'pub': pub,\n 'products': products,\n 'categories': categories,\n 'menu': menu,\n 'cartItems': cartItems,\n 'cat_selected': 0,\n \"res\": res\n\n }\n return render(request, 'seed/product.html', context)\n\n\ndef show_category(request, cat_slug):\n products_2 = Product.objects.filter(cat__slug_cat=cat_slug).select_related('cat')\n categories = Category.objects.all()\n\n data = cartData(request)\n cartItems = data['cartItems']\n\n if len(products_2) == 0:\n raise Http404()\n\n paginator = Paginator(products_2, 6)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n products = Product.objects.all().select_related('cat')\n\n\n context = {\n 'products': products,\n 'products_2': products_2,\n 'categories': categories,\n 'menu': menu,\n 'cartItems': cartItems,\n 'cat_selected': cat_slug,\n 'page_obj': page_obj,\n\n }\n\n return render(request, 'seed/store.html', context=context)\n\nclass CleanSeed(SuccessMessageMixin, CreateView):\n form_class = AddApplicationCustomer\n template_name = 'seed/clean_seed.html'\n success_url = reverse_lazy('index')\n success_message = \"Заявка успішно створена, очікуйте на дзвінок від менеджера найближчим часом.\"\n\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n contex = super().get_context_data(**kwargs)\n data = cartData(self.request)\n contex['cartItems'] = data['cartItems']\n contex['menu'] = menu\n return contex\n\n\n\ndef about(request):\n data = cartData(request)\n cartItems = data['cartItems']\n\n context = {'menu': menu,\n 'cartItems': cartItems\n }\n return render(request, 'seed/about.html', context)\n\n\ndef contact(request):\n data = cartData(request)\n cartItems = data['cartItems']\n\n context = {'menu': menu,\n 'cartItems': cartItems\n }\n\n return render(request, 'seed/contact.html', context)\n\n\ndef delivery(request):\n data = cartData(request)\n cartItems = data['cartItems']\n\n context = {'menu': menu,\n 'cartItems': cartItems\n }\n\n return render(request, 'seed/delivery.html', context)\n\n\ndef we_on_map(request):\n data = cartData(request)\n cartItems = data['cartItems']\n\n context = {'menu': menu,\n 'cartItems': cartItems,\n }\n return render(request, 'seed/we_on_map.html', context)\n\n\ndef processOrder(request):\n transaction_id = datetime.datetime.now().timestamp()\n data = json.loads(request.body)\n cookieData = cookieCart(request)\n items = cookieData['items']\n\n if request.user.is_authenticated:\n name = data['form']['name']\n last_name = data['form']['last_name']\n phone = data['form']['phone']\n email = data['form']['email']\n customer = Customer.objects.get(user=request.user)\n customer.name = name\n customer.last_name = last_name\n customer.phone = phone\n customer.email = email\n customer.save()\n #order = Order.objects.filter(customer=customer, complete=False).first()\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n order.date_orderd = datetime.datetime.now()\n order.save()\n for item in items:\n product = Product.objects.get(pk=item['product']['pk'])\n\n orderItem = OrderItem.objects.create(\n product=product,\n order=order,\n quantity=item['quantity']\n )\n\n\n else:\n customer, order = guestOrder(request, data)\n\n\n total = float(data['form']['total'])\n order.transaction_id = transaction_id\n\n if total == float(order.get_cart_total):\n order.complete = True\n order.save()\n\n if order.shipping == True:\n ShippingAddress.objects.create(\n customer=customer,\n order=order,\n region=data['shipping']['region'],\n city=data['shipping']['city'],\n mail=data['shipping']['mail'],\n mail_number=data['shipping']['mail_number'],\n zipcode=data['shipping']['zipcode'],\n pay=data['shipping']['pay'],\n comment=data['shipping']['comment'],\n )\n return JsonResponse('Payment complete!', safe=False)\n\ndef register(request):\n form = CreateUserForm()\n\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n\n if form.is_valid():\n user = form.save()\n\n Customer.objects.create(user=user)\n\n user = form.cleaned_data.get('username')\n messages.success(request, f'Аккаун для {user} створений. Увійдіть в аккаунт.')\n return redirect('login')\n\n\n context = {'form': form,\n 'menu': menu,\n 'cartItems': 0,\n }\n return render(request, 'seed/register.html', context)\n\n\ndef login_user(request):\n if request.method == 'POST':\n try:\n email = request.POST.get('email')\n password = request.POST.get('password')\n username = User.objects.get(email=email.lower()).username\n\n user = authenticate(request, username=username, password=password)\n # user_p = authenticate(request, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.info(request, \"Некоректний емейл або пароль.\")\n return redirect('login')\n except:\n messages.info(request, \"Некоректний емейл або пароль.\")\n return redirect('login')\n\n\n context = {'menu': menu,\n 'cartItems': 0,}\n return render(request, 'seed/login.html', context)\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\n\n\n\n", "repo_name": "Astrogor4ik/Agroviktoria", "sub_path": "agroviktoria/seed/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 11385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.views.generic.ListView", "line_number": 28, "usage_type": "name"}, {"api_name": "utils.cartData", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 91, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 109, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 140, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 148, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 149, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 159, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 178, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 182, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 184, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 201, "usage_type": "call"}, {"api_name": "django.contrib.messages.views.SuccessMessageMixin", "line_number": 203, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 203, "usage_type": "name"}, {"api_name": "forms.AddApplicationCustomer", "line_number": 204, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 206, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 213, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 221, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 227, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 231, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 238, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 242, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 249, "usage_type": "call"}, {"api_name": "utils.cartData", "line_number": 253, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 259, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 263, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 263, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 264, "usage_type": "call"}, {"api_name": "utils.cookieCart", "line_number": 265, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 281, "usage_type": "attribute"}, {"api_name": "utils.guestOrder", "line_number": 294, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 316, "usage_type": "call"}, {"api_name": "forms.CreateUserForm", "line_number": 319, "usage_type": "call"}, {"api_name": "forms.CreateUserForm", "line_number": 322, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 330, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 330, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 331, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 338, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 348, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 351, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 352, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 354, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 354, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 355, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 357, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 357, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 358, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 363, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 366, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 367, "usage_type": "call"}]}
+{"seq_id": "22930619289", "text": "__author__ = \"Matt Davis\"\n__email__ = \"matthew1.davis@intel.com\"\n__description__ = \"This script loads from the CWB Odata feed to the GSMDW database on sql1717-fm1-in.amr.corp.intel.com,3181\"\n__schedule__ = \"Daily (excluding Sunday because the Weekly Loader runs then) at 3:30 AM PST\"\n\nimport os\nimport sys; sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) # add current file's parent directory to path\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom time import time\nfrom Helper_Functions import queryAPIPortal, uploadDFtoSQL, executeStoredProcedure, getLastRefresh\nfrom Logging import log\nfrom CQN_ILM_API_Weekly import prepMQI\n\n# remove the current file's parent directory from sys.path since it was only needed for imports above\ntry:\n sys.path.remove(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\nexcept ValueError: # Already removed\n pass\n\n\nif __name__ == \"__main__\":\n start_time = time()\n\n ### BEGIN ILM MQI section ###\n # initialize variables\n project_name = 'ILM MQI API Daily Script'\n data_area = 'ILM MQI'\n\n last_load = getLastRefresh(project_name=project_name, data_area=data_area)\n if last_load is None:\n temp = datetime.now() - timedelta(hours=8)\n else:\n temp = datetime.strftime(last_load, '%Y-%m-%dT%H:%M:%S')\n last_load = pd.Timestamp(temp).replace(minute=00, second=00)\n\n row_count = queryAPIPortal(url=\"https://apis-internal.intel.com/ilm/mqi/v1/material-issues?$select=EventId&$filter=\\\"ModifiedDate\\\">='{}'&$format=JSON\".format(last_load)).shape[0]\n\n # Get data from API\n ### IMPORTANT - the same API call will not return the rows in the same order by default, ORDERBY must be used to appropriately get all rows\n df_mat_issue = queryAPIPortal(url=\"https://apis-internal.intel.com/ilm/mqi/v1/material-issues?$filter=\\\"ModifiedDate\\\">='{}'&$orderby=EventId&$format=JSON\".format(last_load))\n print('Loaded {} records from the API into DataFrame'.format(df_mat_issue.shape[0]))\n\n # Transform data\n df = prepMQI(df_mat_issue)\n print('Data prep completed!')\n\n # Load data into SQL Server database\n insert_succeeded, error_msg = uploadDFtoSQL(table=\"stage.stg_API_ILM_MQI\", data=df, chunk_size=500, truncate=True)\n log(insert_succeeded, project_name=project_name, data_area=data_area, row_count=df.shape[0], error_msg=error_msg)\n\n # Execute stored procedure for mqi.speedStg table\n sp_succeeded, error_msg = executeStoredProcedure('mqi.sp_API_ILM_MQI_Merge')\n log(sp_succeeded, project_name=project_name, package_name=\"SQL: mqi.sp_API_ILM_MQI_Merge\", data_area=data_area, error_msg=error_msg)\n ### END ILM MQI section ###\n\n ### BEGIN Root cause table\n project_name = 'ILM MQI API Daily -Root Cause'\n data_area = 'RootCauseDetails' \n row_count = queryAPIPortal(url=\"https://apis-internal.intel.com/ilm/mqi/v1/material-root-causes?$select=EventId&$format=JSON\").shape[0]\n df_root = pd.DataFrame()\n\n # Get data from API\n for i in range(0, row_count, 2000):\n # print(i)\n temp = queryAPIPortal(url=\"https://apis-internal.intel.com/ilm/mqi/v1/material-root-causes?$start_index={}&$count=1000&$orderby=EventId&$format=JSON\".format(i))\n if i == 0:\n df_root = temp\n else:\n df_root = pd.concat([df_root, temp], ignore_index=True)\n print('Loaded {} records from the API into DataFrame'.format(df_root.shape[0]))\n\n # Transform data in DataFrame\n df_root = df_root[['EventId', 'Category', 'Details', 'KeyFailure', 'Systemic', 'PrimaryIndicator', 'ModifiedBy', 'ModifiedDate']]\n df_root['ModifiedDate'] = df_root['ModifiedDate'].apply(lambda x: x if isinstance(x, datetime) else datetime.strptime(x.split(\".\")[0], '%Y-%m-%dT%H:%M:%S') if isinstance(x, str) else None)\n\n # Load data into SQL Server database\n insert_succeeded, error_msg = uploadDFtoSQL(table=\"stage.stg_API_ILM_MQI_RootCauseDetails\", data=df_root, truncate=True)\n log(insert_succeeded, project_name=project_name, data_area=data_area, row_count=df_root.shape[0], error_msg=error_msg) # row_count is automatically set to 0 if error\n\n # Execute Stored Procedure\n sp_succeeded, error_msg = executeStoredProcedure('mqi.sp_API_ILM_MQI_RootCauseDetails')\n log(sp_succeeded, project_name=project_name, package_name=\"SQL: mqi.sp_API_ILM_MQI_RootCauseDetails\", data_area=data_area, error_msg=error_msg)\n\n print(\"--- %s seconds ---\" % (time() - start_time))\n", "repo_name": "abhishekagnihotri-dataanalytics/python-intel-work", "sub_path": "CQN_ILM_API_Daily.py", "file_name": "CQN_ILM_API_Daily.py", "file_ext": "py", "file_size_in_byte": 4493, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.path.remove", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 17, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 23, "usage_type": "call"}, {"api_name": "Helper_Functions.getLastRefresh", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "pandas.Timestamp", "line_number": 35, "usage_type": "call"}, {"api_name": "Helper_Functions.queryAPIPortal", "line_number": 37, "usage_type": "call"}, {"api_name": "Helper_Functions.queryAPIPortal", "line_number": 41, "usage_type": "call"}, {"api_name": "CQN_ILM_API_Weekly.prepMQI", "line_number": 45, "usage_type": "call"}, {"api_name": "Helper_Functions.uploadDFtoSQL", "line_number": 49, "usage_type": "call"}, {"api_name": "Logging.log", "line_number": 50, "usage_type": "call"}, {"api_name": "Helper_Functions.executeStoredProcedure", "line_number": 53, "usage_type": "call"}, {"api_name": "Logging.log", "line_number": 54, "usage_type": "call"}, {"api_name": "Helper_Functions.queryAPIPortal", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "Helper_Functions.queryAPIPortal", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "argument"}, {"api_name": "datetime.datetime.strptime", "line_number": 75, "usage_type": "call"}, {"api_name": "Helper_Functions.uploadDFtoSQL", "line_number": 78, "usage_type": "call"}, {"api_name": "Logging.log", "line_number": 79, "usage_type": "call"}, {"api_name": "Helper_Functions.executeStoredProcedure", "line_number": 82, "usage_type": "call"}, {"api_name": "Logging.log", "line_number": 83, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}]}
+{"seq_id": "11335256006", "text": "import csv\nimport psycopg2\n\nconnection = psycopg2.connect(\"host=localhost user=postgres dbname=homes\")\ncur = connection.cursor()\n\nrecord = {}\n\n# with open(\"homes.csv\", mode=\"r\") as csv_file:\n# csv_reader = csv.DictReader(csv_file)\n# for row in csv_reader:\n# if record.get(row[' \"List\"'] or row[' \"Living\"'].replace(\" \", \"\")) == None:\n# record[row[' \"List\"'] or row[' \"Living\"'].replace(\" \", \"\")] = 0\n# record[row[' \"List\"'] or row[' \"Living\"'].replace(\" \",\"\")] +=1\n\n# with open(\"homes2.csv\", mode=\"w\") as csv_file:\n# fieldnames = [\"Sell\",\"List\",\"Living\",\"Rooms\",\"Beds\",\"Baths\",\"Age\",\"Acres\",\"Taxes\"]\n# csv_writer = csv.DictWriter(csv_file, fieldnames)\n\n# csv_writer.writeheader()\n# for Sell,List,Living,Rooms,Beds,Baths,Age,Acres,Taxes in record.items():\n# csv_writer.writerow({\"Sell\": Sell,\"List\": List,\"Living\":Living,\"Rooms\": Rooms,\"Beds\": Beds,\"Baths\": Baths,\"Age\": Age,\"Acres\": Acres,\"Taxes\": Taxes})\n\nwith open(\"homes.csv\", mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n recordCount = 0\n # next(csv_reader)\n # cur.copy_from(csv_file,'home',sep=',')\n for row in csv_reader:\n sqlInsert = \\\n \"\"\"INSERT INTO home (\"Sell\", \"List\", \"Living\", \"Rooms\", \"Beds\", \"Baths\", \"Age\", \"Acres\", \"Taxes\") VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s);\"\"\"\n try:\n cur.execute(sqlInsert,(row['Sell'], \n row[' \"List\"'], \n row[' \"Living\"'],\n row[' \"Rooms\"'], \n row[' \"Beds\"'], \n row[' \"Baths\"'], \n row[' \"Age\"'], \n row[' \"Acres\"'], \n row[' \"Taxes\"']))\n connection.commit()\n recordCount +=1\n \n except psycopg2.DatabaseError as error:\n print(error)\n quit()\n connection.close()", "repo_name": "JinxCY93/API-Script-Python", "sub_path": "script.py", "file_name": "script.py", "file_ext": "py", "file_size_in_byte": 1984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "psycopg2.connect", "line_number": 4, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 25, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 45, "usage_type": "attribute"}]}
+{"seq_id": "22671462164", "text": "from django.shortcuts import render \r\nfrom dateutil.relativedelta import relativedelta\r\nfrom datetime import date\r\n\r\nfrom setup.models import Charge \r\nfrom mla.models import TransactionAssessment \r\nfrom random import randint\r\nfrom django.contrib import messages\r\nfrom django.http import HttpResponseRedirect\r\n\r\ntoday = date.today()\r\nexpirationdate_1year = today + relativedelta(years=1)\r\n\r\nrange_start = 10**(8-1)\r\nrange_end = (10**8)-1\r\ngenerated_rand_num = randint(range_start, range_end)\r\ntransaction_code = \"ML\"+str(generated_rand_num)\r\n\r\ndef renew_ps(assessment_type, tin_obj, chassis_number, staff_obj):\r\n\t \r\n\t\t# GENERATE BILL\r\n\ttry:\r\n\t\tprint(assessment_type)\r\n\t\t\t\t\t \r\n\r\n\t\t#Get particulars [Vehicle License [1.6 - 2.0]]\r\n\t\tif assessment_type == \"Vehicle License [1.6 - 2.0]\":\r\n\r\n\t\t\tcharge_public_vehicle = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars = 'Vehicle License [1.6 - 2.0]')\r\n\t\t\tparticulars = \"Vehicle License [1.6 - 2.0]\";\r\n\t\t\tamount = charge_public_vehicle.amount\r\n\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t \r\n\t\t#Get particulars [Vehicle License [2.1 - 3.0]]\r\n\t\telif assessment_type == \"Vehicle License [2.1 - 3.0]\":\r\n\r\n\t\t\t\t\t#Check if Particulars have expired:\r\n\t\t\tcharge_vehicle_license_btn2_3 = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Vehicle License [2.1 - 3.0]')\r\n\t\t\t\r\n\t\t\tparticulars = \"Vehicle License [2.1 - 3.0]\";\r\n\t\t\tamount = charge_vehicle_license_btn2_3.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \t\t \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\r\n\t\t#Get particulars [Vehicle License [3.1 - Above]]\t\t\t\t \r\n\t\telif assessment_type == \"Vehicle License [3.1 - Above]\":\r\n\t\t\t\r\n\t\t\t\t\t#Check if Particulars have expired:\r\n\t\t\r\n\t\t\tcharge_vehicle_license_above3 = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Vehicle License [3.1 - Above]')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"Vehicle License [3.1 - Above]\";\r\n\t\t\tamount = charge_vehicle_license_above3.amount;\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \t\t \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t#Get particulars Certificate of road worthiness\r\n\r\n\t\telif assessment_type == \"Certificate of road worthiness\":\r\n\r\n\t\t\tcharge_certificate_of_road_worthiness = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Certificate of road worthiness')\r\n\r\n\t\t\tparticulars = \"Certificate of road worthiness\";\r\n\t\t\tamount = charge_certificate_of_road_worthiness.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \t\t\r\n\t\t\t\r\n\t\t\t \r\n\t\t#Get particulars Proof of ownership\t\t\t \r\n\t\telif assessment_type == \"Proof of ownership\":\r\n\t\t\t\t \r\n\t\t\t\t \r\n\t\t\tcharge_proof_of_ownership = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Proof of ownership')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"Proof of ownership\";\r\n\t\t\tamount = charge_proof_of_ownership.amount \r\n\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \t\t \r\n\t\t\t\r\n\t\t\t\r\n\r\n\t\t#Get particulars New Plate Number\t\t\t\t \r\n\t\telif assessment_type == \"New Plate Number\":\r\n\t\t\tcharge_new_plate_number = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='New Plate Number')\r\n\t\t\t\r\n\t\t\tparticulars = \"New Plate Number\";\r\n\t\t\tamount = charge_new_plate_number.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, transaction_type = 'Renewal of Particulars', staff =staff_obj) \r\n\t\t\t\r\n\t\t\t\r\n\t\t\r\n\t\t#Get particulars Registration Book\t\t\t\t \r\n\t\telif assessment_type == \"Registration Book\":\r\n\t\t\tcharge_registration_book = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Registration Book')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"Registration Book\";\r\n\t\t\tamount = charge_registration_book.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, transaction_type = 'Renewal of Particulars', staff =staff_obj) \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t#Get particulars SMS Alert\r\n\t\telif assessment_type == \"SMS Alert\":\r\n\t\t\tcharge_sms_alert = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='SMS Alert')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"SMS Alert\";\r\n\t\t\tamount = charge_sms_alert.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, transaction_type = 'Renewal of Particulars', staff =staff_obj)\r\n\t\t\t\r\n\t\t\t \r\n\r\n\t\t#Get particulars Stamp Duty\r\n\t\telif assessment_type == \"Stamp Duty\":\r\n\t\t\tcharge_stamp_duty = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Stamp Duty')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"Stamp Duty\";\r\n\t\t\tamount = charge_stamp_duty.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, transaction_type = 'Renewal of Particulars', staff =staff_obj) \r\n\r\n\t\t\t\r\n\r\n\t\treturn transaction_code\r\n\r\n\texcept Charge.DoesNotExist as e:\r\n\t\t \r\n\t\treturn \"charge_unavailable\"\r\n\r\n\t\t\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\treturn \"uncaught_exception\"", "repo_name": "mawejjehakim/ci", "sub_path": "mla/bills/renew_bills/renew_ps.py", "file_name": "renew_ps.py", "file_ext": "py", "file_size_in_byte": 6509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "datetime.date.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 11, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 12, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 16, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 29, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 33, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 33, "usage_type": "name"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 41, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 41, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 47, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 47, "usage_type": "name"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 57, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 57, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 63, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 63, "usage_type": "name"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 70, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 70, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 76, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 76, "usage_type": "name"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 83, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 83, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 88, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 88, "usage_type": "name"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 94, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 94, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 100, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 100, "usage_type": "name"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 106, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 106, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 112, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 112, "usage_type": "name"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 118, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 118, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 124, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 124, "usage_type": "name"}, {"api_name": "setup.models.Charge.objects.get", "line_number": 130, "usage_type": "call"}, {"api_name": "setup.models.Charge.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 130, "usage_type": "name"}, {"api_name": "mla.models.TransactionAssessment.objects.create", "line_number": 136, "usage_type": "call"}, {"api_name": "mla.models.TransactionAssessment.objects", "line_number": 136, "usage_type": "attribute"}, {"api_name": "mla.models.TransactionAssessment", "line_number": 136, "usage_type": "name"}, {"api_name": "setup.models.Charge.DoesNotExist", "line_number": 142, "usage_type": "attribute"}, {"api_name": "setup.models.Charge", "line_number": 142, "usage_type": "name"}]}
+{"seq_id": "74684691470", "text": "from Crypto.Util.number import long_to_bytes, inverse\nfrom gmpy2 import isqrt, square, is_square\n\nn = REDACTED\ne = REDACTED\nc = REDACTED\n\ndef fermat_factors(n):\n assert n % 2 != 0\n a = isqrt(n)\n b2 = square(a) - n\n while not is_square(b2):\n a += 1\n b2 = square(a) - n\n factor1 = a + isqrt(b2)\n factor2 = a - isqrt(b2)\n return int(factor1), int(factor2)\n\np, q = fermat_factors(n)\nd = inverse(e, (p - 1) * (q - 1))\nm = pow(c, d, n)\n\nprint(long_to_bytes(m))\n", "repo_name": "ByamB4/Common-CTF-Challenges", "sub_path": "cryptography/asymmetric-cipher/src/fermats_factor_attack.py", "file_name": "fermats_factor_attack.py", "file_ext": "py", "file_size_in_byte": 490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 33, "dataset": "github-code", "pt": "83", "api": [{"api_name": "gmpy2.isqrt", "line_number": 10, "usage_type": "call"}, {"api_name": "gmpy2.square", "line_number": 11, "usage_type": "call"}, {"api_name": "gmpy2.is_square", "line_number": 12, "usage_type": "call"}, {"api_name": "gmpy2.square", "line_number": 14, "usage_type": "call"}, {"api_name": "gmpy2.isqrt", "line_number": 15, "usage_type": "call"}, {"api_name": "gmpy2.isqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "Crypto.Util.number.inverse", "line_number": 20, "usage_type": "call"}, {"api_name": "Crypto.Util.number.long_to_bytes", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "18022851479", "text": "from django.shortcuts import render, get_object_or_404\nfrom . import models\n\n\ndef categories(request):\n categories = models.categories.objects.all()\n return render(request, 'Molavi/categories.html', {'categories': categories})\n\n\ndef all_details(request, question_id):\n categories_raftare_manfi = get_object_or_404(models.categories, pk=question_id)\n categories_raftare_mosbat = get_object_or_404(models.categories,pk=question_id)\n categories_jameh = get_object_or_404(models.categories,pk=question_id)\n return render(request, 'Molavi/raftare_manfi.html', {'categories_raftare_manfi': categories_raftare_manfi,\n 'categories_raftare_mosbat':categories_raftare_mosbat,\n 'categories_jameh' : categories_jameh,\n })\n\n\ndef results(request,question_id):\n test = models.raftar_manfi.objects.filter(title='مقدمه ای بر رفتار منفی',pk=question_id)\n test2 = models.raftar_manfi.objects.filter(title='آزمندی',pk=question_id)\n test3 = models.raftar_manfi.objects.filter(title='بخل', pk=question_id)\n return render(request, 'Molavi/results.html', {'test':test,'test2':test2,'test3':test3})\n", "repo_name": "rezaPython/Molavi", "sub_path": "Molavi/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1295, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "django.shortcuts.render", "line_number": 7, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 11, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 12, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "38782544501", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport sys\nfrom datetime import datetime\nimport json\n\nfrom .response import Response\n\n\n# Level 1\nclass Project(Response):\n def __init__(self, token=None, id=None, title=None, **kwargs):\n Response.__init__(self, token, **kwargs)\n self.title = title\n self.id = id\n # self.description = description\n # self.milestones = milestones\n self.endpoint = '/api/v1/projects.json'\n self.specific_endpoint = '/api/v1/projects/{id}.json'\n\n def register(self):\n response = self._add(endpoint=self.endpoint, item=self.to_dict())\n return self.__class__(token=self.token, **response)\n\n def get(self):\n response = self._get_or_update(endpoint=self.specific_endpoint, id=self.id, method='GET')\n return self.__class__(token=self.token, **response)\n\n def list(self, name=None, page_num=None):\n response = self.find(endpoint=self.endpoint, name=name, page_num=page_num)\n if isinstance(response, list):\n return [self.__class__(token=self.token, **item) for item in response]\n else:\n return []\n\n def update(self):\n response = self._get_or_update(endpoint=self.specific_endpoint, id=self.id, method='PUT', item=self.to_dict())\n return self.__class__(token=self.token, **response)\n\n\n# Level 2\nclass Folder(Project):\n def __init__(self, token=None, id=None, title=None, project_id=None, **kwargs):\n Project.__init__(self, token, id, title, **kwargs)\n self.project_id = project_id\n self.endpoint = '/api/v1/milestones.json'\n self.specific_endpoint = '/api/v1/milestones/{id}.json'\n\n def __get_folders(self, period=None):\n response = self.find(endpoint=self.endpoint, project_id=self.id, period=period)\n if isinstance(response, list) and len(response) > 0:\n return [Folder(project_id=self.id, token=self.token, **item) for item in response]\n else:\n return []\n\n def get_current_folders(self):\n return self.__get_folders(period='current_milestones')\n\n def get_future_folders(self):\n return self.__get_folders(period='future_milestones')\n\n def get_past_folders(self):\n return self.__get_folders(period='last_milestones')\n\n\n# Level 2.1\nclass Experiment(Project):\n def __init__(self, token=None, project_id=None, milestone_id=None, id=None, title=None, **kwargs):\n Project.__init__(self, token, id, title, **kwargs)\n self.project_id = project_id\n self.milestone_id = milestone_id\n self.endpoint = '/api/v1/experiments.json'\n self.specific_endpoint = '/api/v1/experiments/{id}.json'\n\n\n# Level 2.2\nclass Procedure(Project):\n def __init__(self, token=None, container_id=None, id=None, name=None, section_type=None, container_type=None,\n **kwargs):\n Project.__init__(self, token, id, **kwargs)\n self.container_id = container_id\n self.name = name\n self.section_type = section_type\n self.container_type = container_type\n self.endpoint = '/api/v1/sections.json'\n self.specific_endpoint = '/api/v1/sections/{id}.json'\n\n\n# Level 2.3\nclass Element(Project):\n def __init__(self, token=None, container_id=None, id=None, data=None, element_type=None, container_type=None,\n experiment_id=None, **kwargs):\n Project.__init__(self, token, id, **kwargs)\n self.experiment_id = experiment_id\n self.container_id = container_id\n self.data = data\n self.element_type = element_type\n self.container_type = container_type\n self.endpoint = '/api/v1/elements.json'\n self.specific_endpoint = '/api/v1/elements/{id}.json'\n self.specific_endpoint_type = '/api/v1/experiments/{id}/elements.json'\n self.update_stock_amount_endpoint = '/api/v1/stocks/{id}/update_stock_amount'\n self.add_attachment_endpoint = '/api/v1/attachments/{id}'\n\n def update_element(self):\n response = self._get_or_update(endpoint=self.specific_endpoint, id=self.id, method='PUT', element=self.to_dict())\n return self.__class__(token=self.token, **response)\n\n def list_by_type(self):\n response = self._get_or_update(endpoint=self.specific_endpoint_type, id=self.experiment_id,\n element_type=self.element_type)\n if isinstance(response, list):\n return [self.__class__(token=self.token, **item) for item in response]\n else:\n return []\n\n def get_data(self):\n if self.element_type == 'form':\n return json.loads(self.description).get('form_json')\n\n elif self.element_type == 'samples':\n return json.loads(self.data).get('samples')\n\n elif self.element_type == 'plate':\n return json.loads(self.data).get('wells')\n\n else:\n return self.data\n\n\n def update_stock_amount(self, sample_id, stock_id, amount_used, unit_type, unit_type_name):\n if self.element_type == 'samples':\n response = self._get_or_update(endpoint=self.update_stock_amount_endpoint,\n id=stock_id,\n amount_used=amount_used,\n unit_type=unit_type,\n unit_type_name=unit_type_name,\n element_id=self.id,\n sample_id=sample_id,\n subtract='true',\n method='POST')\n return self.__class__(token=self.token, **response)\n else:\n return []\n\n def add_step(self, txt='', hours='00', minutes='00', seconds='00', completed_by=''):\n if self.element_type == 'steps':\n step = {\n \"title\": '' + txt + '
',\n \"timer\": {\n \"hours\": hours,\n \"minutes\": minutes,\n \"seconds\": seconds\n },\n \"completed\": True,\n \"completed_by\": completed_by,\n \"completed_at\": datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n }\n\n steps = json.loads(self.data)\n steps.append(step)\n data = json.dumps(steps)\n response = self._get_or_update(endpoint=self.specific_endpoint, id=self.id, method='PUT', data=data)\n return self.__class__(token=self.token, **response)\n else:\n return []\n\n def add_attachment(self, attachment_id):\n if self.element_type == 'attachments':\n response = self._get_or_update(endpoint=self.add_attachment_endpoint,\n id=attachment_id,\n item={'element_id': self.id},\n method='PUT')\n return self.__class__(token=self.token, **response)\n else:\n return []\n", "repo_name": "BioData/LabguruPython", "sub_path": "labguru/project.py", "file_name": "project.py", "file_ext": "py", "file_size_in_byte": 7090, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "83", "api": [{"api_name": "response.Response", "line_number": 12, "usage_type": "name"}, {"api_name": "response.Response.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "response.Response", "line_number": 14, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 120, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 123, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 158, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 161, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 163, "usage_type": "call"}]}
+{"seq_id": "29696752160", "text": "import math\r\nimport pygame\r\n\r\n# Initialize Pygame\r\npygame.init()\r\n\r\n# Set the screen dimensions\r\nscreen_width = 800\r\nscreen_height = 600\r\n\r\n# Create the screen\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\n\r\n# Set the title of the screen\r\npygame.display.set_caption(\"Two-Link Robot Simulation\")\r\n\r\n# Define the colors\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\ngreen = (0, 255, 0)\r\nblue = (0, 0, 255)\r\n\r\n# Define the link lengths\r\nl1 = 190.0\r\nl2 = 130.0\r\n\r\n# Define the initial angles of the two links\r\nhomepos1=-45.0\r\nhomepos2=-160.0\r\n\r\ntheta1 = homepos1*math.pi/180\r\ntheta2 = homepos2*math.pi/180\r\n\r\nitheta1limitl = -1.0\r\nitheta1limith = 160.0\r\nitheta2limitl = -1.0\r\nitheta2limith = 160.0\r\n\r\ntheta1limitl = math.pi*itheta1limitl/180\r\ntheta1limith = math.pi*itheta1limith/180\r\ntheta2limitl = math.pi*itheta2limitl/180\r\ntheta2limith = math.pi*itheta2limith/180\r\n\r\n\r\n# Define the position of the base of the robot\r\nx0 = screen_width / 2\r\ny0 = screen_height / 2\r\n\r\n# Define the position of the end-effector of the robot\r\nx_end = x0 + l1 * math.cos(theta1) + l2 * math.cos(theta1 + theta2)\r\ny_end = y0 + l1 * math.sin(theta1) + l2 * math.sin(theta1 + theta2)\r\n\r\n# Define the speed of the robot\r\nspeed = 4.9234123490871293874\r\n\r\n# Define the font for displaying the angles\r\nfont = pygame.font.SysFont(None, 25)\r\n\r\n# Define the clock\r\nclock = pygame.time.Clock()\r\n\r\n# Define the main loop of the program\r\n# Define the main loop of the program\r\nwhile True:\r\n # Clear the screen\r\n screen.fill(white)\r\n\r\n # Draw the base of the robot\r\n pygame.draw.circle(screen, black, (int(x0), int(y0)), 10)\r\n\r\n # Draw the first link of the robot\r\n pygame.draw.line(screen, black, (int(x0), int(y0)), (int(x0 + l1 * math.cos(theta1)), int(y0 + l1 * math.sin(theta1))), 5)\r\n\r\n # Draw the second link of the robot\r\n pygame.draw.line(screen, black, (int(x0 + l1 * math.cos(theta1)), int(y0 + l1 * math.sin(theta1))), (int(x_end), int(y_end)), 5)\r\n\r\n # Draw the end-effector of the robot\r\n pygame.draw.circle(screen, blue, (int(x_end), int(y_end)), 10)\r\n\r\n # Display the angles of the two links\r\n theta1_text = font.render(\"Theta1: {:.2f}\".format(math.degrees(theta1)), True, black)\r\n theta2_text = font.render(\"Theta2: {:.2f}\".format(math.degrees(theta2)), True, black)\r\n screen.blit(theta1_text, (10, 10))\r\n screen.blit(theta2_text, (10, 40))\r\n\r\n # Handle events\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n quit()\r\n\r\n # Update the angles of the two links based on the pressed keys\r\n keys = pygame.key.get_pressed()\r\n\r\n if keys[pygame.K_LEFT] and keys[pygame.K_UP]:\r\n theta1 -= math.radians(speed)\r\n theta2 -= math.radians(speed)\r\n if theta1 < -theta1limith:\r\n theta1 = -theta1limith\r\n if theta2 < -theta2limith:\r\n theta2 = -theta2limith\r\n elif keys[pygame.K_LEFT] and keys[pygame.K_DOWN]:\r\n theta1 -= math.radians(speed)\r\n theta2 += math.radians(speed)\r\n if theta1 < -theta1limith:\r\n theta1 = -theta1limith\r\n if theta2 > theta2limitl:\r\n theta2 = theta2limitl\r\n elif keys[pygame.K_RIGHT] and keys[pygame.K_UP]:\r\n theta1 += math.radians(speed)\r\n theta2 -= math.radians(speed)\r\n if theta1 > theta1limitl:\r\n theta1 = theta1limitl\r\n if theta2 < -theta2limith:\r\n theta2 = -theta2limith\r\n elif keys[pygame.K_RIGHT] and keys[pygame.K_DOWN]:\r\n theta1 += math.radians(speed)\r\n theta2 += math.radians(speed)\r\n if theta1 > theta1limitl:\r\n theta1 = theta1limitl\r\n if theta2 > theta2limitl:\r\n theta2 = theta2limitl\r\n elif keys[pygame.K_LEFT] or keys[pygame.K_RIGHT]:\r\n if keys[pygame.K_LEFT]:\r\n theta1 -= math.radians(speed)\r\n else:\r\n theta1 += math.radians(speed)\r\n if theta1 < -theta1limith:\r\n theta1 = -theta1limith\r\n elif theta1 > theta1limitl:\r\n theta1 = theta1limitl\r\n elif keys[pygame.K_UP] or keys[pygame.K_DOWN]:\r\n if keys[pygame.K_UP]:\r\n theta2 -= math.radians(speed)\r\n else:\r\n theta2 += math.radians(speed)\r\n if theta2 < -theta2limith:\r\n theta2 = -theta2limith\r\n elif theta2 > theta2limitl:\r\n theta2 = theta2limitl\r\n\r\n # Update the position of the end\r\n x_end = x0 + l1 * math.cos(theta1) + l2 * math.cos(theta1 + theta2)\r\n y_end = y0 + l1 * math.sin(theta1) + l2 * math.sin(theta1 + theta2)\r\n # Update the screen\r\n pygame.display.update()\r\n\r\n # Set the FPS\r\n clock.tick(60)\r\n\r\npygame.quit()\r\n\r\n", "repo_name": "tykrus01/TARS", "sub_path": "SCARA_anglecontrol_arm.py", "file_name": "SCARA_anglecontrol_arm.py", "file_ext": "py", "file_size_in_byte": 4864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "pygame.init", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 15, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 33, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 40, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 41, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 42, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 43, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 51, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 73, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 73, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 76, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 76, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 79, "usage_type": "attribute"}, {"api_name": "math.degrees", "line_number": 82, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 98, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 100, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 101, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.K_LEFT", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 107, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 108, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 109, "usage_type": "call"}, {"api_name": "pygame.K_RIGHT", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 114, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 115, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.K_RIGHT", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 121, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 122, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 123, "usage_type": "call"}, {"api_name": "pygame.K_LEFT", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 129, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 130, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.K_UP", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 138, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 139, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 141, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 148, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 156, "usage_type": "call"}]}
+{"seq_id": "9697234232", "text": "import json\nfrom pathlib import Path\nimport argh\n\npositions_dir = Path(\"Manu-Data\") / \"Positions\"\nfit_dir = Path(\"Manu-Data\") / \"LineFit\"\n\ndef main():\n positions_paths = positions_dir.glob(\"*.json\")\n db = {}\n for path in positions_paths:\n with path.open() as f:\n data = json.load(f)\n position_id = path.stem\n\n # Zeroth, remove the big arrays that we have no need for here\n del data[\"mean\"]\n del data[\"std\"]\n del data[\"cont\"]\n del data[\"wavs\"]\n\n # First, get the per-position data such as x, y\n db[position_id] = data\n\n # Second, get the per-(position + line) data\n fit_subdir = fit_dir / position_id\n line_paths = fit_subdir.glob(\"*.json\")\n for linepath in line_paths:\n with linepath.open() as f:\n linedata = json.load(f)\n line_id = linepath.stem\n data[line_id] = linedata\n with open(\"manu_spectral_fit_db.json\", \"w\") as f:\n json.dump(db, f, indent=2)\n\n\n\nif __name__ == \"__main__\":\n argh.dispatch_command(main)\n", "repo_name": "will-henney/orion-tsq", "sub_path": "manu-photom-consolidate.py", "file_name": "manu-photom-consolidate.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "pathlib.Path", "line_number": 5, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 6, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "argh.dispatch_command", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "42579129606", "text": "from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom django.http import HttpResponse\nimport base64\nfrom PIL import Image\nfrom io import BytesIO\nimport os\n\n# Image Classification Imports\nimport numpy as np\nfrom PIL import Image\nfrom imageio import imread\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport tf_slim as slim\nfrom tf_slim.nets import inception\nimport tf_slim as slim\nimport cv2\n\ncurrent_path = os.path.join(os.getcwd(), 'inception_api')\nckpt_path = os.path.join(current_path, \"input/inception_v3.ckpt\")\nimages_path = os.path.join(current_path, \"images/*\")\nimg_width = 299\nimg_height = 299\nbatch_size = 16\nbatch_shape = [batch_size, img_height, img_width, 3]\nnum_classes = 1001\npredict_output = []\nclass_names_path = os.path.join(current_path, \"input/imagenet_class_names.txt\")\nwith open(class_names_path) as f:\n class_names = f.readlines()\n\n\n# To Load Image\ndef load_images(input_dir):\n global batch_shape\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n files = tf.gfile.Glob(input_dir)[:20]\n files.sort()\n for filepath in files:\n with tf.gfile.Open(filepath, \"rb\") as f:\n imgRaw = np.array(Image.fromarray(imread(f, as_gray=False, pilmode=\"RGB\")).resize((299, 299))).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = imgRaw * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images\n\n\ndef start_prediction():\n predict_output=[]\n return_arr = []\n\n X = tf.placeholder(tf.float32, shape=batch_shape)\n\n with slim.arg_scope(inception.inception_v3_arg_scope()):\n logits, end_points = inception.inception_v3(\n X, num_classes=num_classes, is_training=False, reuse=tf.AUTO_REUSE\n )\n\n predictions = end_points[\"Predictions\"]\n saver = tf.train.Saver(slim.get_model_variables())\n\n\n session_creator = tf.train.ChiefSessionCreator(\n scaffold=tf.train.Scaffold(saver=saver),\n checkpoint_filename_with_path=ckpt_path,\n master='')\n\n with tf.train.MonitoredSession(session_creator=session_creator) as sess:\n for filenames, images in load_images(images_path):\n labels = sess.run(predictions, feed_dict={X: images})\n for filename, label, image in zip(filenames, labels, images):\n predict_output.append([filename, label, image])\n \n for x in predict_output:\n out_list = list(x[1])\n topPredict = sorted(range(len(out_list)), key=lambda i: out_list[i], reverse=True)[:5]\n for p in topPredict:\n return_arr.append(class_names[p-1].strip())\n\n return return_arr\n\n\n\ndef base64_to_image(base64_string):\n # Convert base64 string to PIL Image\n path=os.path.join(os.getcwd(), 'inception_api', 'images', 'example.jpg')\n imgdata = base64.b64decode(base64_string)\n image = Image.open(BytesIO(imgdata))\n image.save(path)\n\n# Create your views here.\n@api_view(['POST'])\ndef get_classification(request):\n rarr = []\n # print(\"Got request- \", request.data)\n image = base64_to_image(request.data['image'])\n prediction_output = start_prediction()\n # print(\"prediction_output: \", prediction_output)\n \n for val in prediction_output:\n rarr.append(' '.join(val.split(' ')[1:]))\n\n # print(\"Rarray- \", rarr)\n return Response({'status': 'success', 'prediction': rarr})\n \ndef index(request):\n return HttpResponse(\"Hello, world. You're at the inception_api index.\")", "repo_name": "omkarshinde254/Inception_v3-Image-Classification", "sub_path": "mysite/inception_api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3872, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.disable_v2_behavior", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.gfile.Glob", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.gfile", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 43, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.gfile.Open", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.gfile", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 47, "usage_type": "name"}, {"api_name": "imageio.imread", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 65, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.float32", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tf_slim.arg_scope", "line_number": 67, "usage_type": "call"}, {"api_name": "tf_slim.nets.inception.inception_v3_arg_scope", "line_number": 67, "usage_type": "call"}, {"api_name": "tf_slim.nets.inception", "line_number": 67, "usage_type": "name"}, {"api_name": "tf_slim.nets.inception.inception_v3", "line_number": 68, "usage_type": "call"}, {"api_name": "tf_slim.nets.inception", "line_number": 68, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.AUTO_REUSE", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 69, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.train.Saver", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 73, "usage_type": "name"}, {"api_name": "tf_slim.get_model_variables", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train.ChiefSessionCreator", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 76, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.train.Scaffold", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 77, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.train.MonitoredSession", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train", "line_number": 81, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 99, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 100, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 101, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 101, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 101, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 117, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 105, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 120, "usage_type": "call"}]}
+{"seq_id": "74552704557", "text": "import os\nimport xlsxwriter\n\ndef readFileName(wb, ws, path):\n\tfilenames = os.listdir(path)\n\tbold = wb.add_format({'bold': True})\n\tws.write('A1', 'Path', bold)\n\tws.write('B1', 'FileName', bold)\n\trow = 1\n\tcol = 0\n\n\tfor filename in filenames:\n\t\tfname, ext = os.path.splitext(filename)\n\t\tif ext.lower() in ['.jpg', '.jpeg','.png','.gif']:\n\t\t\tws.write(row, col, path)\n\t\t\tws.write(row, col+1, filename)\n\t\t\trow += 1\n\t\t#full_filename = os.path.join(path, filename)\n\t\t#print (full_filename)\n\tws.write(row, 0, 'Total', bold)\n\tws.write(row, 1, '=counta(B2:B'+str(row)+')', bold)\n\ndef main():\n\tfor i in os.listdir(os.getcwd()):\n\t\tif os.path.isdir(i):\n\t\t\trf = i + '.xlsx'\n\t\t\tprint(rf)\n\t\t\tworkbook = xlsxwriter.Workbook(rf)\n\t\t\tworksheet = workbook.add_worksheet()\n\t\t\treadFileName(workbook, worksheet, i)\n\t\t\tworkbook.close()\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "bullseye73/python", "sub_path": "fs/recognitionResult.py", "file_name": "recognitionResult.py", "file_ext": "py", "file_size_in_byte": 850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.listdir", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "xlsxwriter.Workbook", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "31440116944", "text": "f\"\"\"\nWalnut Models\n\"\"\"\n\n###\n# Libraries\n###\nimport uuid\n\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\n\n\n###\n# Models\n###\n\nclass VideoManager(models.Manager):\n\n def create(self, **data):\n if data.get('aws_credentials'):\n del data['aws_credentials']\n\n return super().create(**data)\n\nclass Video(models.Model):\n\n class StatusChoices(models.TextChoices):\n SUCCESS = 'Success', _('Success')\n FAILED = 'Failed', _('Failed')\n RUNNING = 'Running', _('Running')\n\n uuid = models.UUIDField(\n verbose_name=_('UUID'),\n default=uuid.uuid4,\n editable=False\n\n )\n\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=300,\n blank=True,\n null=True\n )\n\n description = models.TextField(\n verbose_name=_(\"Description\"),\n blank=True,\n null=True\n )\n\n video_source = models.URLField(\n verbose_name=_(\"Video \"),\n max_length=200\n )\n\n status = models.CharField(\n verbose_name=_(\"Status\"),\n max_length=10,\n choices=StatusChoices.choices,\n default=StatusChoices.RUNNING\n )\n\n user = models.ForeignKey(\n 'accounts.User',\n on_delete=models.CASCADE,\n blank=False,\n null=False\n )\n\n webhook_url = models.URLField(\n verbose_name=_(\"Webhook URL\"),\n max_length=200, \n null=False,\n blank=False\n )\n\n use_dash = models.BooleanField(\n verbose_name=_(\"Use Dash\"),\n default=False,\n null=False\n )\n\n dash_file = models.URLField(\n verbose_name=_(\"Dash file\"),\n max_length=200,\n null=True,\n blank=True,\n help_text=\"This field will be auto generated\"\n )\n\n use_hls = models.BooleanField(\n verbose_name=_(\"Use HLS\"),\n default=False,\n null=False\n )\n\n hls_file = models.URLField(\n verbose_name=_(\"HLS file\"),\n max_length=200,\n null=True,\n blank=True,\n help_text=\"This field will be auto generated\"\n )\n\n \n duration = models.FloatField(\n verbose_name=_(\"Duration\"),\n default=0,\n help_text=\"This field will be auto generated\"\n )\n\n objects = VideoManager()\n \n ", "repo_name": "MarlonCorreia/walnut", "sub_path": "walnut/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2298, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "django.db.models.Manager", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.TextChoices", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 29, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 30, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models.UUIDField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 34, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models.URLField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 67, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models.URLField", "line_number": 85, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 85, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 86, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 93, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 93, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 94, "usage_type": "call"}, {"api_name": "django.db.models.URLField", "line_number": 99, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 99, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 100, "usage_type": "call"}, {"api_name": "django.db.models.FloatField", "line_number": 108, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 108, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 109, "usage_type": "call"}]}
+{"seq_id": "256956298", "text": "from collections import OrderedDict\nimport json\nimport inspect\n\nfrom . import base, fields\nfrom .generics import generic, method, call_next_method\n\ndef to_json(schema_family):\n return json.dumps(to_data(schema_family), indent=4, separators=(',', ': '))\n\n@generic\ndef to_data(obj, *args, **kwargs):\n pass\n\n@method(to_data)\ndef schema_family_to_data(family: base.SchemaFamily):\n data = OrderedDict()\n data['title'] = family.name\n data['$schema'] = \"http://json-schema.org/draft-04/schema#\"\n for schema in family.schemas.values():\n data[schema.__name__] = to_data(schema)\n return data\n\n\n@method(to_data)\ndef schema_to_data(schema: base.SchemaMeta):\n data = OrderedDict()\n data['title'] = schema.__name__\n data['description'] = inspect.getdoc(schema)\n for record in schema._meta.records.values():\n data[record.__name__] = to_data(record)\n return data\n\n@method(to_data)\ndef record_to_data(record: base.RecordMeta):\n data = OrderedDict()\n data['title'] = record.__name__\n data['type'] = \"object\"\n data['properties'] = OrderedDict( (field.__name__, to_data(field))\n for field in record._meta.fields.values() )\n\n links = [ to_link_description(link)\n for link in record._meta.fields.values()\n if isinstance(link, fields.Link) ]\n\n if len(links) > 0:\n data['links'] = links\n\n for child in record._meta.children.values():\n prop_name = getattr(child, 'collective_name', child.__name__)\n data['properties'][prop_name] = OrderedDict((\n ('type', 'array'), ('items', to_data(child))))\n\n return data\n\n@method(to_data)\ndef tree_record_to_data(tree: base.TreeMeta):\n data = call_next_method(tree)\n tree_structure = OrderedDict(((rank, {'type': 'string'})\n for rank in tree._ranks))\n data['properties']['tree_structure'] = OrderedDict((\n ('type', 'object'), ('properties', tree_structure)))\n\n return data\n\n@method(to_data)\ndef field_to_data(field: base.Field):\n return OrderedDict()\n\n@method(to_data)\ndef text_field_to_data(field: fields.Text):\n data = call_next_method(field)\n data[\"type\"] = \"string\"\n return data\n\n@method(to_data)\ndef date_field_to_data(field: fields.Date):\n data = call_next_method(field)\n data[\"type\"] = \"string\"\n data['format'] = \"date-time\"\n return data\n\n@method(to_data)\ndef integer_field_to_data(field: fields.Integer):\n data = call_next_method(field)\n data[\"type\"] = \"integer\"\n return data\n\n@method(to_data)\ndef boolean_field_to_data(field: fields.Boolean):\n data = call_next_method(field)\n data[\"type\"] = \"boolean\"\n return data\n\n@method(to_data)\ndef link_to_data(link: fields.Link):\n data = call_next_method(link)\n data[\"type\"] = \"string\"\n data[\"format\"] = \"uuid\"\n return data\n\ndef to_link_description(link):\n target_schema = \"#/%s/%s\" % (link.target._meta.schema.__name__,\n link.target.__name__)\n\n href = \"/%s/%s/{%s}/\" % (link.target._meta.schema.__name__,\n link.target.__name__,\n link.__name__)\n return OrderedDict((\n ('rel', link.__name__),\n ('href', href),\n ('targetSchema', target_schema)))\n", "repo_name": "benanhalt/SchemaTools", "sub_path": "egFish/specify/schema/to_json.py", "file_name": "to_json.py", "file_ext": "py", "file_size_in_byte": 3296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "83", "api": [{"api_name": "json.dumps", "line_number": 9, "usage_type": "call"}, {"api_name": "generics.generic", "line_number": 11, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 17, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 27, "usage_type": "call"}, {"api_name": "inspect.getdoc", "line_number": 29, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 36, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 39, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 51, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 34, "usage_type": "call"}, {"api_name": "generics.call_next_method", "line_number": 58, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 59, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 61, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 56, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 68, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 66, "usage_type": "call"}, {"api_name": "generics.call_next_method", "line_number": 72, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 70, "usage_type": "call"}, {"api_name": "generics.call_next_method", "line_number": 78, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 76, "usage_type": "call"}, {"api_name": "generics.call_next_method", "line_number": 85, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 83, "usage_type": "call"}, {"api_name": "generics.call_next_method", "line_number": 91, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 89, "usage_type": "call"}, {"api_name": "generics.call_next_method", "line_number": 97, "usage_type": "call"}, {"api_name": "generics.method", "line_number": 95, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 109, "usage_type": "call"}]}
+{"seq_id": "31773641981", "text": "###############################################################################\n##### Data Preprocesssing\n###############################################################################\nfrom typing import Tuple\nimport pandas as pd\nimport constants as cst\nfrom datetime import timedelta\nfrom functools import partial\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n##### Get logger\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.DEBUG)\n\n##### Preprocessor\nclass Preprocessor:\n def __init__(self, val_split_length: int = 90) -> None:\n self.split_day = cst.MAX_DATE - timedelta(val_split_length)\n\n def preprocess(\n self,\n data: pd.DataFrame = None,\n train_val_split: bool = True,\n ) -> pd.DataFrame:\n if train_val_split:\n data = self.enforce_data_quality(data)\n self.get_validation_set(data)\n\n # load train_data\n data = pd.read_csv(cst.TRAIN_TRANSACTION_PATH, index_col=0)\n data[\"date_order\"] = pd.to_datetime(data[\"date_order\"])\n data = data.sort_values(\"date_order\")\n\n data = self.feature_engineering(data)\n data.to_csv(cst.FEATURES_PATH)\n\n labels = self.get_labels(data)\n train_set, test_set = self.get_train_test_sets(\n pd.read_csv(cst.FEATURES_PATH, index_col=0), labels\n )\n\n return train_set, test_set\n\n def enforce_data_quality(self, data: pd.DataFrame) -> pd.DataFrame:\n # remove zero/negative quantities\n data = data[data[\"quantity\"] > 0]\n # remove zero/negative sales\n data = data[data[\"sales_net\"] > 0]\n # enforce date types\n data[\"date_order\"] = pd.to_datetime(data[\"date_order\"])\n data[\"date_invoice\"] = pd.to_datetime(data[\"date_invoice\"])\n\n data = data[\n (data[\"date_order\"].dt.year >= 2015)\n & (data[\"date_invoice\"].dt.year >= 2015)\n ]\n\n logger.info(\"Done performing data quality checks\")\n\n return data\n\n def get_validation_set(self, data: pd.DataFrame) -> None:\n train_set = data.loc[data[\"date_order\"] < self.split_day]\n val_set = data.loc[data[\"date_order\"] >= self.split_day]\n\n logger.info(\"Done splitting validation and train transactions\")\n\n train_set.to_csv(cst.TRAIN_TRANSACTION_PATH)\n val_set.to_csv(cst.VALIDATION_DATA_PATH)\n\n logger.info(\"Done saving validation and train transactions\")\n\n def feature_engineering(self, data: pd.DataFrame) -> pd.DataFrame:\n logger.info(\"Started feature engineering\")\n\n features = data.groupby(by=\"client_id\", as_index=False).agg(\n online_percent=(\"order_channel\", partial(item_freq, item=\"online\")),\n phone_percent=(\"order_channel\", partial(item_freq, item=\"by phone\")),\n store_percent=(\"order_channel\", partial(item_freq, item=\"at the store\")),\n visit_percent=(\n \"order_channel\",\n partial(item_freq, item=\"during the visit of a sales rep\"),\n ),\n other_percent=(\"order_channel\", partial(item_freq, item=\"other\")),\n mean_qty=(\"quantity\", \"mean\"),\n max_qty=(\"quantity\", \"max\"),\n min_qty=(\"quantity\", \"min\"),\n std_qty=(\"quantity\", \"std\"),\n last_qty_1=(\"quantity\", partial(last, n=1)),\n last_qty_2=(\"quantity\", partial(last, n=2)),\n last_qty_3=(\"quantity\", partial(last, n=3)),\n last_qty_4=(\"quantity\", partial(last, n=4)),\n mean_sales=(\"sales_net\", \"mean\"),\n max_sales=(\"sales_net\", \"max\"),\n min_sales=(\"sales_net\", \"min\"),\n std_sales=(\"sales_net\", \"std\"),\n last_sales_1=(\"sales_net\", partial(last, n=1)),\n last_sales_2=(\"sales_net\", partial(last, n=2)),\n last_sales_3=(\"sales_net\", partial(last, n=3)),\n last_sales_4=(\"sales_net\", partial(last, n=4)),\n n_branch=(\"branch_id\", \"nunique\"),\n n_product=(\"product_id\", \"nunique\"),\n purchase_freq=(\"date_order\", purchase_frequency),\n delay_purchase_n1=(\"date_order\", partial(time_delay, n=1)),\n delay_purchase_n2=(\"date_order\", partial(time_delay, n=2)),\n delay_purchase_n3=(\"date_order\", partial(time_delay, n=3)),\n delay_purchase_n4=(\"date_order\", partial(time_delay, n=4)),\n client_age=(\"date_order\", partial(time_from_today, n=0)),\n time_from_last_purchase=(\"date_order\", partial(time_from_today, n=-1)),\n client_lifetime=(\"date_order\", lifetime),\n )\n\n logger.info(\"Done feature engineering\")\n\n return features\n\n def get_labels(self, data: pd.DataFrame) -> pd.DataFrame:\n # get purchasing frequencies\n freqs = data.groupby(by=\"client_id\", as_index=False).agg(\n last_purchase=(\"date_order\", \"max\"),\n frequency=(\"date_order\", purchase_frequency),\n )\n\n # setting churn deffinitions\n freq_churn = self.split_day - timedelta(30)\n medium_churn = self.split_day - timedelta(90)\n\n # assiging client categories\n freqs.loc[\n (freqs[\"frequency\"] > 0) & (freqs[\"frequency\"] < 10), \"client_category\"\n ] = \"freq_buyer\"\n freqs.loc[\n (freqs[\"frequency\"] >= 10) & (freqs[\"frequency\"] < 32), \"client_category\"\n ] = \"med_buyer\"\n freqs.loc[\n (freqs[\"frequency\"] >= 32) | (freqs[\"frequency\"] == 0), \"client_category\"\n ] = \"infreq_buyer\"\n\n # assigning labels\n freqs.loc[\n (freqs[\"client_category\"] == \"freq_buyer\")\n & (freqs[\"last_purchase\"] <= freq_churn),\n \"is_churn\",\n ] = 1\n freqs.loc[\n (freqs[\"client_category\"] == \"freq_buyer\")\n & (freqs[\"last_purchase\"] > freq_churn),\n \"is_churn\",\n ] = 0\n freqs.loc[\n (freqs[\"client_category\"] == \"med_buyer\")\n & (freqs[\"last_purchase\"] <= medium_churn),\n \"is_churn\",\n ] = 1\n freqs.loc[\n (freqs[\"client_category\"] == \"med_buyer\")\n & (freqs[\"last_purchase\"] > medium_churn),\n \"is_churn\",\n ] = 0\n\n logger.info(\"Done getting labels\")\n\n freqs.to_csv(cst.TRAIN_TARGET_PATH)\n\n return freqs\n\n def get_train_test_sets(\n self, training_data: pd.DataFrame, training_target: pd.DataFrame\n ) -> Tuple[pd.DataFrame, pd.DataFrame]:\n full_training_data = pd.merge(\n training_data, training_target, on=\"client_id\", how=\"left\"\n )\n full_training_data.dropna(inplace=True)\n train_set, test_set = train_test_split(\n full_training_data, test_size=0.3, random_state=42\n )\n\n logger.info(\"Done splitting train and test set\")\n\n train_set.to_css(cst.TRAIN_SET_PATH)\n test_set.to_css(cst.TEST_SET_PATH)\n\n logger.info(\"Done saving train and test set\")\n\n return train_set, test_set\n\n\ndef purchase_frequency(dates):\n return pd.Timedelta(np.diff(dates.unique()).mean()).total_seconds() / (60 * 60 * 24)\n\n\ndef time_delay(dates, n=1):\n try:\n return pd.Timedelta(dates.iloc[-n] - dates.iloc[-n - 1]).total_seconds() / (\n 60 * 60 * 24\n )\n except IndexError:\n return np.nan\n\n\ndef lifetime(dates):\n return pd.Timedelta(dates.iloc[-1] - dates.iloc[0]).total_seconds() / (60 * 60 * 24)\n\n\ndef time_from_today(dates, n=0):\n return pd.Timedelta(cst.MAX_DATE - dates.iloc[n]).total_seconds() / (60 * 60 * 24)\n\n\ndef last(sequence, n=1):\n try:\n return sequence.iloc[-n]\n except IndexError:\n return np.nan\n\n\ndef item_freq(series, item):\n if not np.isin(item, series):\n return 0\n else:\n return series.value_counts(normalize=True)[item]\n", "repo_name": "henrique-britoleao/churn_prediction", "sub_path": "src/preprocessing.py", "file_name": "preprocessing.py", "file_ext": "py", "file_size_in_byte": 7883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "constants.MAX_DATE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 34, "usage_type": "call"}, {"api_name": "constants.TRAIN_TRANSACTION_PATH", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 35, "usage_type": "call"}, {"api_name": "constants.FEATURES_PATH", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "constants.FEATURES_PATH", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "attribute"}, {"api_name": "constants.TRAIN_TRANSACTION_PATH", "line_number": 72, "usage_type": "attribute"}, {"api_name": "constants.VALIDATION_DATA_PATH", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 81, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 82, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 83, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 86, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 88, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 93, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 94, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 95, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 96, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 101, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 102, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 103, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 104, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 108, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 109, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 110, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 111, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 112, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 121, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 130, "usage_type": "call"}, {"api_name": "constants.TRAIN_TARGET_PATH", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 174, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 178, "usage_type": "call"}, {"api_name": "constants.TRAIN_SET_PATH", "line_number": 184, "usage_type": "attribute"}, {"api_name": "constants.TEST_SET_PATH", "line_number": 185, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 173, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pandas.Timedelta", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 202, "usage_type": "attribute"}, {"api_name": "pandas.Timedelta", "line_number": 206, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 210, "usage_type": "call"}, {"api_name": "constants.MAX_DATE", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.isin", "line_number": 221, "usage_type": "call"}]}
+{"seq_id": "18212208455", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 14 16:02:52 2020\r\n\r\n@author: Harry\r\n\"\"\"\r\n# 1 remeber me \r\nimport json \r\n#如果以前储存了用户名,就加载它\r\n#否则就提示用户输入用户名并储存它\r\nfilename = 'username.json'\r\ntry:\r\n with open(filename) as f_obj:\r\n username = json.load(f_obj)\r\nexcept FileNotFoundError:\r\n username = input(\"What is your name? \")\r\n with open(filename,'w') as f_obj:\r\n json.dump(username,f_obj)\r\n print(\"We will remeber you when you come back, \" +username + \" !\")\r\nelse:\r\n print(\"Welcome back, \"+ username+ '!')\r\n\r\nprint()\r\n# 2 重构\r\nimport json\r\n\r\ndef get_stored_username():\r\n #如果储存了用户名,就获取它\r\n filename = 'username1.json'\r\n try: \r\n with open(filename) as f_obj:\r\n username = json.load(f_obj)\r\n except FileNotFoundError:\r\n return None\r\n else:\r\n return username\r\n \r\n \r\ndef get_new_username():\r\n #提示用户输入用户名\r\n username = input(\"What is your name? \")\r\n filename = 'username1.json'\r\n with open(filename,'w') as f_obj:\r\n json.dump(username,f_obj)\r\n return username\r\n\r\n\r\ndef greet_user():\r\n #问候用户,并指出其名字\r\n username = get_stored_username()\r\n if username:\r\n user_state = input(\"Are you \"+ username+ \"?(Y/N)\")\r\n if user_state =='Y':\r\n print(\"welcome back \"+username+ \"!\")\r\n else:\r\n username = get_new_username()\r\n print(\"We will remeber you when you come back, \"+ username+ '!')\r\n else:\r\n username = get_new_username()\r\n print(\"We will remeber you when you come back, \"+ username+ '!')\r\ngreet_user()\r\n ", "repo_name": "sunstriderLHT/python-start", "sub_path": "chapter 10/remeber_me.py", "file_name": "remeber_me.py", "file_ext": "py", "file_size_in_byte": 1712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "23858547974", "text": "import redis\nimport json\n\n\n\nr = redis.StrictRedis(host=\"192.168.1.7\", port=6379, db=0)\ndef add_redis():\n r = redis.Redis(host=\"192.168.1.7\", port=6379, password=\"123\", db=0)\n with r.pipeline(transaction=False) as p:\n\n for value in range(1,13000):\n # print(value)\n mapping = {value:value}\n r.zadd(\"Pp-RelationStoreFans_961948\", mapping)\n p.execute()\n\n\n\ndef get_redis_token(userId):\n token_redis = r.get('pp-user-test:token:app_user:' + userId)\n if token_redis is not None:\n print(type(token_redis))\n print(token_redis)\n str_token = str(token_redis, encoding=\"utf8\")\n dict_token = json.loads(str_token)\n\n token = dict_token[\"token\"]\n print(token)\n return token\n else:\n\n print(\"获取的token没有数据\")\n exit()\n\n\n\n\nif __name__ == '__main__':\n # add_redis()\n userId = input(\"输入userId:\")\n\n get_redis_token(userId)\n\n", "repo_name": "So777888/openstore", "sub_path": "get_redis_token.py", "file_name": "get_redis_token.py", "file_ext": "py", "file_size_in_byte": 952, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "redis.StrictRedis", "line_number": 6, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 8, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "429471116", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\ndef extract(city, page):\n '''Extracting information for jobs city-wise'''\n headers = {\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:96.0) Gecko/20100101 Firefox/96.0'\n }\n url = f'https://jobs.accaglobal.com/jobs/{city}/{page}'\n r = requests.get(url, headers)\n soup = BeautifulSoup(r.content, 'html.parser')\n return soup\n\n\ndef transform(soup):\n '''Extracting relevant information from the soup object'''\n divs = soup.find_all('div', class_=\"lister__details cf js-clickable\")\n for block in divs:\n title = block.find_all('span')[0].text\n location = block.find_all(\n class_=\"lister__meta-item lister__meta-item--location\")[0].text\n salary = block.find_all(\n class_=\"lister__meta-item lister__meta-item--salary\")[0].text\n recuriter = block.find_all(\n class_=\"lister__meta-item lister__meta-item--recruiter\")[0].text\n a = [\n x['href'].strip()\n for x in block.find_all(class_='js-clickable-area-link', href=True)\n ]\n url = 'https://jobs.accaglobal.com' + str(a).replace(\"['\", '').replace(\n \"']\", '')\n job = {\n 'Title': title,\n 'Location': location,\n 'Salary': salary,\n 'Recuriter': recuriter,\n 'Link': url\n }\n joblist.append(job)\n\n\n# Create and empty job list\njoblist = []\n\n# Creating a writer object for writing data in multiple sheets\nwriter = pd.ExcelWriter('ACCA_jobs.xlsx', engine='xlsxwriter')\nfor city in ['karachi', 'lahore', 'islamabad']:\n for i in range(0, 10):\n print(f'Getting {city.capitalize()} jobs at page, {i}')\n c = extract(city, i)\n transform(c)\ndf = pd.DataFrame(joblist).drop_duplicates()\ndf.to_excel(writer, sheet_name='ACCA_Jobs', index=False)\nwriter.close()\n", "repo_name": "kashifnaz/ACCA_Jobs_Web_Scraping", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.ExcelWriter", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 55, "usage_type": "call"}]}
+{"seq_id": "22658617051", "text": "import os\nimport json\nimport parquet\nimport pandas as pd\n\nhome = os.path.expanduser(\"~\")\ndir = \"/media/sumeyer/SSD_2/ML_DATA/\"\nfilename = \"part-r-00000-67ebd6f0-bfb4-42e0-b516-d7aaa77cbcb8.snappy.parquet\"\ndatafile = dir + filename\n\nprint(\"open file : \", datafile)\n\n\n## assuming parquet file with two rows and three columns:\n## foo bar baz\n## 1 2 3\n## 4 5 6\n\nwith open(datafile) as fo:\n # prints:\n # {\"foo\": 1, \"bar\": 2}\n # {\"foo\": 4, \"bar\": 5}\n for row in parquet.DictReader(fo):\n print(json.dumps(row))\n\n\nwith open(datafile) as fo:\n # prints:\n # 1,2\n # 4,5\n for row in parquet.reader(fo):\n print(\",\".join([str(r) for r in row]))\n\nprint(df.info())\nprint(df)", "repo_name": "SvenMeyer/keras", "sub_path": "import_parquet.py", "file_name": "import_parquet.py", "file_ext": "py", "file_size_in_byte": 694, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.path.expanduser", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "parquet.DictReader", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 24, "usage_type": "call"}, {"api_name": "parquet.reader", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "11482726919", "text": "def calculate_total_price(N, M, C, D, P):\r\n total_price = 0 # 合計金額を0で初期化\r\n\r\n # 高橋くんが食べた各皿について\r\n for i in range(N):\r\n # その皿の色が価格が定義されている色かどうかを確認\r\n if C[i] in D:\r\n # 定義されている色ならその価格を合計に加える\r\n color_index = D.index(C[i])\r\n total_price += P[color_index + 1] # P[0]は定義されていない色なので +1 する\r\n else:\r\n # 定義されていない色ならP[0]を合計に加える\r\n total_price += P[0]\r\n\r\n return total_price\r\n\r\n\r\n\"\"\"\r\nphp\r\n\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nf = 800 \r\nhm = 1.5 \r\nhb = [30, 120, 180] \r\nd = np.linspace(1, 10, 100)\r\n\r\ndef okumura_hata(d, f, hb, hm):\r\n ahr = (1.1 * np.log10(f) - 0.7) * hm - (1.56 * np.log10(f) - 0.8)\r\n return 69.55 + 26.16 * np.log10(f) - 13.82 * np.log10(hb) + (44.9 - 6.55 * np.log10(hb)) * np.log10(d) - ahr\r\n\r\n# Calculate loss for each base station antenna height\r\nl = [okumura_hata(d, f, h, hm) for h in hb]\r\n\r\n# Plot\r\nplt.figure(figsize=(10, 6))\r\nfor i, loss in enumerate(l):\r\n plt.plot(d, loss, label=f'hb = {hb[i]} m')\r\nplt.xscale('log')\r\nplt.xlabel('d (km)')\r\nplt.ylabel('Loss (dB)')\r\nplt.title('Loss : d Okumura-Hata')\r\nplt.legend()\r\nplt.grid(True)\r\nplt.show()\r\n", "repo_name": "s1f102103189/Atcorder", "sub_path": "temp/Default Price.py", "file_name": "Default Price.py", "file_ext": "py", "file_size_in_byte": 2141, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "numpy.linspace", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]}
+{"seq_id": "7424461184", "text": "from io import StringIO\nfrom unittest import mock\n\nimport pytest\nfrom django.core.management import CommandError, call_command\n\nfrom ansible_base.models import Authenticator, AuthenticatorUser\n\n\n@pytest.mark.parametrize(\n \"command_args\",\n [None, \"--list\"],\n)\ndef test_authenticators_cli_list_with_tabulate(command_args, local_authenticator, ldap_authenticator):\n \"\"\"\n When we have tabulate, we have to parse a fancy table.\n\n Ensure that table contains the authenticators we expect.\n \"\"\"\n out = StringIO()\n err = StringIO()\n\n if command_args is None:\n call_command('authenticators', stdout=out, stderr=err)\n else:\n call_command('authenticators', command_args, stdout=out, stderr=err)\n\n lines = out.getvalue().strip().splitlines()\n headers = (\"ID\", \"Enabled\", \"Name\", \"Order\")\n\n for header in headers:\n assert header in lines[0]\n\n for line, authenticator in ((2, local_authenticator), (3, ldap_authenticator)):\n auth_line = lines[line]\n auth_line = auth_line.strip('|')\n (auth_id, enabled, name, order) = auth_line.split(' | ')\n\n assert auth_id.strip() == str(authenticator.id)\n assert enabled.strip() == str(authenticator.enabled)\n assert name.strip() == str(authenticator.name)\n assert order.strip() == str(authenticator.order)\n\n\n@pytest.mark.parametrize(\n \"command_args\",\n [None, \"--list\"],\n)\n@mock.patch(\"ansible_base.management.commands.authenticators.HAS_TABULATE\", False)\ndef test_authenticators_cli_list_without_tabulate(command_args, local_authenticator, ldap_authenticator):\n \"\"\"\n When we don't have tabulate, we have to parse a simple table.\n\n Ensure that table contains the authenticators we expect.\n \"\"\"\n out = StringIO()\n err = StringIO()\n\n if command_args is None:\n call_command('authenticators', stdout=out, stderr=err)\n else:\n call_command('authenticators', command_args, stdout=out, stderr=err)\n\n lines = out.getvalue().strip().splitlines()\n headers = (\"ID\", \"Enabled\", \"Name\", \"Order\")\n\n for header in headers:\n assert header in lines[0]\n\n for line, authenticator in ((1, local_authenticator), (2, ldap_authenticator)):\n auth_line = lines[line]\n (auth_id, enabled, name, order) = auth_line.split('\\t')\n\n assert auth_id.strip() == str(authenticator.id)\n assert enabled.strip() == str(authenticator.enabled)\n assert name.strip() == str(authenticator.name)\n assert order.strip() == str(authenticator.order)\n\n\ndef test_authenticators_cli_initialize(django_user_model):\n \"\"\"\n Calling with --initialize will create:\n - An authenticator if there is an admin user\n \"\"\"\n out = StringIO()\n err = StringIO()\n\n # Sanity check:\n assert django_user_model.objects.count() == 0\n\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n call_command('authenticators', \"--initialize\", stdout=out, stderr=err)\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 255\n assert \"No admin user exists\" in err.getvalue()\n\n django_user_model.objects.create(username=\"admin\")\n call_command('authenticators', \"--initialize\", stdout=out, stderr=err)\n assert \"Created default local authenticator\" in out.getvalue()\n\n\ndef test_authenticators_cli_initialize_pre_existing(django_user_model, local_authenticator, admin_user):\n \"\"\"\n What if we already have an admin user?\n\n In this case, the command should do nothing on --initialize.\n \"\"\"\n out = StringIO()\n err = StringIO()\n\n # Sanity check:\n assert django_user_model.objects.count() == 1\n existing_user = django_user_model.objects.first()\n assert AuthenticatorUser.objects.count() == 0\n\n call_command('authenticators', \"--initialize\", stdout=out, stderr=err)\n\n # Make sure no new user got created.\n assert django_user_model.objects.count() == 1\n assert django_user_model.objects.filter(username=\"admin\").count() == 1\n new_user = django_user_model.objects.first()\n\n # Nothing should have changed\n assert existing_user == new_user\n assert existing_user.date_joined == new_user.date_joined\n assert out.getvalue() == \"\"\n assert err.getvalue() == \"\"\n\n # No AuthenticatorUser should get created in this case\n assert AuthenticatorUser.objects.count() == 0\n\n\n@pytest.mark.parametrize(\n \"start_state, flag, end_state, exp_out, exp_err\",\n [\n pytest.param(False, \"--enable\", True, \"\", \"\", id=\"disabled -> enabled\"),\n pytest.param(False, \"--disable\", False, \"\", \"\", id=\"disabled -> disabled\"),\n pytest.param(True, \"--enable\", True, \"\", \"\", id=\"enabled -> enabled\"),\n pytest.param(True, \"--disable\", False, \"\", \"\", id=\"enabled -> disabled\"),\n ],\n)\ndef test_authenticators_cli_enable_disable(local_authenticator, start_state, flag, end_state, exp_out, exp_err):\n \"\"\"\n Test enabling/disabling an authenticator.\n \"\"\"\n local_authenticator.enabled = start_state\n local_authenticator.save()\n\n out = StringIO()\n err = StringIO()\n\n assert Authenticator.objects.get(id=local_authenticator.id).enabled == start_state\n call_command('authenticators', flag, local_authenticator.id, stdout=out, stderr=err)\n assert Authenticator.objects.get(id=local_authenticator.id).enabled == end_state\n\n assert out.getvalue() == exp_out\n assert err.getvalue() == exp_err\n\n\n@pytest.mark.parametrize(\n \"flag\",\n [\"--enable\", \"--disable\"],\n)\n@pytest.mark.django_db\ndef test_authenticators_cli_enable_disable_nonexisting(flag):\n \"\"\"\n Test enabling/disabling a non-existing authenticator.\n \"\"\"\n\n out = StringIO()\n err = StringIO()\n\n with pytest.raises(CommandError) as e:\n call_command('authenticators', flag, 1337, stdout=out, stderr=err)\n\n assert \"Authenticator 1337 does not exist\" in str(e.value)\n", "repo_name": "ansible/django-ansible-base", "sub_path": "ansible_base/tests/management/test_authenticators.py", "file_name": "test_authenticators.py", "file_ext": "py", "file_size_in_byte": 5860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "73", "api": [{"api_name": "io.StringIO", "line_number": 20, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 21, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 24, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 56, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 57, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 60, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 45, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 45, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 49, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 49, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 85, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 86, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 91, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 92, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 98, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 108, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 109, "usage_type": "call"}, {"api_name": "ansible_base.models.AuthenticatorUser.objects.count", "line_number": 114, "usage_type": "call"}, {"api_name": "ansible_base.models.AuthenticatorUser.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "ansible_base.models.AuthenticatorUser", "line_number": 114, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 116, "usage_type": "call"}, {"api_name": "ansible_base.models.AuthenticatorUser.objects.count", "line_number": 130, "usage_type": "call"}, {"api_name": "ansible_base.models.AuthenticatorUser.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "ansible_base.models.AuthenticatorUser", "line_number": 130, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 149, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 150, "usage_type": "call"}, {"api_name": "ansible_base.models.Authenticator.objects.get", "line_number": 152, "usage_type": "call"}, {"api_name": "ansible_base.models.Authenticator.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "ansible_base.models.Authenticator", "line_number": 152, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 153, "usage_type": "call"}, {"api_name": "ansible_base.models.Authenticator.objects.get", "line_number": 154, "usage_type": "call"}, {"api_name": "ansible_base.models.Authenticator.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "ansible_base.models.Authenticator", "line_number": 154, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 133, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 136, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 137, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 138, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 139, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 170, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 171, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 173, "usage_type": "call"}, {"api_name": "django.core.management.CommandError", "line_number": 173, "usage_type": "argument"}, {"api_name": "django.core.management.call_command", "line_number": 174, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 160, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 164, "usage_type": "attribute"}]}
+{"seq_id": "70909504555", "text": "from django import template\n\nregister = template.Library()\n\n@register.filter(name='dynamic_index')\ndef dynamic_index(list_instance, index):\n \"\"\"\n Accepts a list of weeks materials and return a specific index according to week_number value.\n \"\"\"\n try:\n return list_instance[index]\n except:\n return ''", "repo_name": "Ibrahem3amer/bala7", "sub_path": "cms/templatetags/admin_interface_extras.py", "file_name": "admin_interface_extras.py", "file_ext": "py", "file_size_in_byte": 328, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.template.Library", "line_number": 3, "usage_type": "call"}, {"api_name": "django.template", "line_number": 3, "usage_type": "name"}]}
+{"seq_id": "29401672532", "text": "#Steuerung SBR-Anlage Kläranlage Altenahr MB2022-06\n#control program for the emergency & temporary wastewater treatment plant in Altenahr, \n#which was installed in April 2022 after the catastrophic flood from Juli 2021.\n#This temporary control program ran successfully by a Raspberry Pi Zero WH \n#connected to a LOW-Triggered relais board from May until July 2022.\n#With many thanks to Ludwig, Calvin, Kurt and Christoph for their hardware support, \n#the friendly commitment and that you tried it with me and this program!\n\nimport time\nimport tkinter\nimport threading\nfrom datetime import datetime, timedelta\nimport RPi.GPIO as GPIO\n\nmain = tkinter.Tk()\nmain.geometry('1000x700')\nmain.title('Steuerung SBR-Anlage Altenahr')\n\ndef ende():\n '''Close and End'''\n global hauptlauf, sbr1_lauf\n hauptlauf = \"AUS\"\n sbr1_lauf = \"AUS\"\n allout()\n GPIO.cleanup()\n main.destroy()\n\ndef tgesberechnen():\n '''calculate total time'''\n global t_d1, t_n1, t_d2, t_n2, t_sed, t_abzug, t_still, t_ges\n t_ges = t_d1 + t_n1 + t_d2 + t_n2 + t_sed + t_abzug + t_still\n ausgabetges[\"text\"] = str(t_ges)\n\ndef uebernehmen(phasenzeit, eingabevariable, ausgabevariable,\n fehlermeldungsvariable):\n '''check input'''\n try:\n ganzzahl = int(eingabevariable.get())\n if 0 <= ganzzahl < 1000:\n phasenzeit = ganzzahl\n ausgabevariable[\"text\"] = str(phasenzeit)\n else:\n t_11 = threading.Thread(target = fehlermeldung,\n args = (fehlermeldungsvariable,))\n t_11.start()\n except:\n t_11 = threading.Thread(target = fehlermeldung,\n args = (fehlermeldungsvariable,))\n t_11.start()\n return(phasenzeit)\n\ndef fehlermeldung(fehlermeldungsv):\n '''error message'''\n fehlermeldungsv[\"text\"] = \"Bitte eine ganze Zahl zwischen 0 und 999 eingeben\"\n time.sleep(3)\n fehlermeldungsv[\"text\"] = \"\"\n\ndef deni1get():\n '''get input for Deni1'''\n global t_d1\n t_d1 = uebernehmen(t_d1, eingabe1, ausgabedeni1, fehlerdeni1)\n tgesberechnen()\n eingabe1.delete(0, 'end')\n\ndef nitri1get():\n '''get input for Nitri1'''\n global t_n1\n t_n1 = uebernehmen(t_n1, eingabe2, ausgabenitri1, fehlernitri1)\n tgesberechnen()\n eingabe2.delete(0, 'end')\n\ndef deni2get():\n '''get input for Deni2'''\n global t_d2\n t_d2 = uebernehmen(t_d2, eingabe3, ausgabedeni2, fehlerdeni2)\n tgesberechnen()\n eingabe3.delete(0, 'end')\n\ndef nitri2get():\n '''get input for Nitri2'''\n global t_n2\n t_n2 = uebernehmen(t_n2, eingabe4, ausgabenitri2, fehlernitri2)\n tgesberechnen()\n eingabe4.delete(0, 'end')\n\ndef sedget():\n '''get input for Sedimentation'''\n global t_sed\n t_sed = uebernehmen(t_sed, eingabe5, ausgabesed, fehlersed)\n tgesberechnen()\n eingabe5.delete(0, 'end')\n\ndef klabzugget():\n '''get input for clarification'''\n global t_abzug\n t_abzug = uebernehmen(t_abzug, eingabe6, ausgabeklabzug, fehlerklabzug)\n tgesberechnen()\n eingabe6.delete(0, 'end')\n\ndef stillget():\n '''get input for waiting time'''\n global t_still\n t_still = uebernehmen(t_still, eingabe7, ausgabestillstand,\n fehlerstillstand)\n tgesberechnen()\n eingabe7.delete(0, 'end')\n\ndef GPIO_initialisieren():\n '''initialise GPIOs'''\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(23, GPIO.OUT) #Zulaufpumpe\n GPIO.setup(22, GPIO.OUT) #Ruehrwerk\n GPIO.setup(25, GPIO.OUT) #Beluefter1\n GPIO.setup(24, GPIO.OUT) #Beluefter2\n\ndef allout():\n '''set out all GPIOs'''\n GPIO.output(23, GPIO.HIGH) #Ausschalten Zulaufpumpe\n GPIO.output(22, GPIO.HIGH) #Ausschalten Ruehrwerk\n GPIO.output(25, GPIO.HIGH) #Ausschalten Belüfter1\n GPIO.output(24, GPIO.HIGH) #Ausschalten Belüfter2\n\ndef countdownSBR1():\n '''calculate and show time until next phase starts'''\n global sbr1_auto, sbr1_phaseendezeit\n while sbr1_auto == \"AN\":\n while datetime.now() < sbr1_phaseendezeit:\n restzeit = str((sbr1_phaseendezeit - datetime.now()))\n SBR1Restzeitlabel['text'] = restzeit[0:restzeit.find('.')]\n time.sleep(1)\n SBR1Restzeitlabel['text'] = ''\n\ndef SBR1an():\n '''run SBR1'''\n global t_d1, t_n1, t_d2, t_n2, t_sed, t_abzug, t_still, sbr1_lauf, sbr1_phaseendezeit, sbr1_auto, sbr1_count\n sbr1_auto = \"AN\"\n t_5 = threading.Thread(target = countdownSBR1)\n t_5.start()\n while sbr1_lauf == \"AN\":\n #1. Deniphase 1: Ruehrwerk an\n GPIO.output(22, GPIO.LOW) #Einschalten Ruehrwerk\n sbr1phase[\"text\"] = \"Denitrifikation 1\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_d1)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #2. Nitriphase 1: Beluefter an\n GPIO.output(25, GPIO.LOW) #Einschalten Belüfter1\n GPIO.output(24, GPIO.LOW) #Einschalten Belüfter2\n sbr1phase[\"text\"] = \"Nitrifikation 1\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_n1)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #3. Deniphase 2: Beluefter aus\n GPIO.output(25, GPIO.HIGH) #Ausschalten Belüfter1\n GPIO.output(24, GPIO.HIGH) #Ausschalten Belüfter2\n sbr1phase[\"text\"] = \"Denitrifikation 2\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_d2)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #4. Nitriphase 2: Beluefter an\n GPIO.output(25, GPIO.LOW) #Einschalten Belüfter1\n GPIO.output(24, GPIO.LOW) #Einschalten Belüfter2\n sbr1phase[\"text\"] = \"Nitrifikation 2\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_n2)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #5. Sedimentations-/Absetzphase: Ruehrwerk und Belüfter aus\n GPIO.output(22, GPIO.HIGH) #Ausschalten Ruehrwerk\n GPIO.output(25, GPIO.HIGH) #Ausschalten Belüfter1\n GPIO.output(24, GPIO.HIGH) #Ausschalten Belüfter2\n sbr1phase[\"text\"] = \"Sedimentation\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_sed)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #7. Klarwasserabzugs- und Zulaufphase\n GPIO.output(23, GPIO.LOW) #Einschalten Zulaufpumpe\n sbr1phase[\"text\"] = \"Klarwasserabzug, Zulauf\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_abzug)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n GPIO.output(23, GPIO.HIGH) #Ausschalten Zulaufpumpe\n\n #9. Stillstandszeit\n sbr1phase[\"text\"] = \"Stillstandszeit\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_still)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n if sbr1_lauf == \"AN\":\n sbr1_count += 1\n SBR1Durchganglabel[\"text\"] = str(sbr1_count)\n\n sbr1phase[\"text\"] = \"Pause\"\n sbr1_phaseendezeit = datetime.now()\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n sbr1phasestart[\"text\"] = \"\"\n sbr1_auto = \"AUS\"\n\ndef schalten1():\n '''put SBR1 on or off'''\n global sbr1_lauf\n if sbr1_lauf == \"AUS\" and sbr1phase[\"text\"] == \"Pause\":\n sbr1_lauf = \"AN\"\n schalter1['bg'] = 'lime'\n schalter1['text'] = 'An'\n t_1 = threading.Thread(target = SBR1an)\n t_1.start()\n else:\n sbr1_lauf = \"AUS\"\n schalter1['bg'] = 'red'\n schalter1['text'] = 'Aus'\n\ndef zeitstempelaktualisieren():\n '''update time stamp'''\n global hauptlauf\n while hauptlauf == \"AN\":\n zeitjetzt['text'] = time.strftime(\"%H:%M:%S\",time.localtime())\n time.sleep(1)\n\ndef cputempaktualisieren():\n '''update temperatur for CPU'''\n global hauptlauf\n while hauptlauf == \"AN\":\n tempData = \"/sys/class/thermal/thermal_zone0/temp\"\n dateilesen = open(tempData, \"r\")\n temperatur = dateilesen.readline(5)\n dateilesen.close()\n temperatur = round(float(temperatur)/1000,1)\n cputemp['text'] = temperatur\n time.sleep(10)\n\n#Programm Start\nGPIO_initialisieren()\nallout()\nhauptlauf = \"AN\"\nsbr1_lauf = \"AN\"\nsbr1_auto = \"AUS\"\nt_d1 = 0#30 #Min\nt_n1 = 240#210 #Min\nt_d2 = 0 #Min\nt_n2 = 0 #Min\nt_sed = 30#60 #Min\nt_abzug = 5#60 #Min\nt_still = 15#30 Min\nt_ges = t_d1 + t_n1 + t_d2 + t_n2 + t_sed + t_abzug + t_still\nsbr1_count = 0\nsbr1_phaseendezeit = datetime.now()\n\n#Überschrift\ntkinter.Label(main, text = 'Kläranlage Altenahr ', font = ('arial', 11,'bold')\n ).place(x = 10, y = 10)\ntkinter.Label(main, text = 'SBR ', font = ('arial', 20, 'bold')\n ).place(x = 10, y = 50, anchor = 'w')\ntkinter.Label(main, text = '1', font = ('arial', 20, 'bold')\n ).place(x = 250, y = 50, anchor = 'center')\n\n#Schalter\nschalter1 = tkinter.Button(main, width = 8, text = 'Schalten',\n command = schalten1, cursor = 'tcross', bg = 'lime',\n font = ('arial', 10, 'bold'))\nschalter1.place(x = 250, y = 85, anchor = 'center')\n\n#Phase\ntkinter.Label(main, text = 'Aktuelle Phase:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 120, anchor = 'w')\nsbr1phase = tkinter.Label(main, text = 'Pause', font = ('arial', 11, 'bold'))\nsbr1phase.place(x = 250, y = 120, anchor = 'center')\n\ntkinter.Label(main, text = 'Start Phase:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 140, anchor = 'w')\nsbr1phasestart = tkinter.Label(main, text = '-', font = ('arial', 11, 'bold'))\nsbr1phasestart.place(x = 250, y = 140, anchor = 'center')\n\ntkinter.Label(main, text = 'Ende Phase:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 160, anchor = 'w')\nsbr1phaseende = tkinter.Label(main, text = '-', font = ('arial', 11, 'bold'))\nsbr1phaseende.place(x = 250, y = 160, anchor = 'center')\n\ntkinter.Label(main, text = 'Restzeit:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 180, anchor = 'w')\nSBR1Restzeitlabel = tkinter.Label(main, text = '-',\n font = ('arial', 11, 'bold'))\nSBR1Restzeitlabel.place(x = 250, y = 180, anchor = 'center')\n\ntkinter.Label(main, text = 'Durchgang:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 200, anchor = 'w')\nSBR1Durchganglabel = tkinter.Label(main, text = str(sbr1_count),\n font = ('arial', 11, 'bold'))\nSBR1Durchganglabel.place(x = 250, y = 200, anchor = 'center')\n\n#Deni1\ntkinter.Label(main, text = 'Denitrifikation 1: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 250, anchor = 'e')\nausgabedeni1 = tkinter.Label(main, text = str(t_d1),\n font = ('arial', 11, 'bold'))\nausgabedeni1.place(x = 245, y = 240, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 240, anchor = 'w')\neingabe1 = tkinter.Entry(main, width = 5)\neingabe1.place(x = 245, y = 260, anchor = 'e')\neingabe1but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = deni1get)\neingabe1but.place(x = 250, y = 260, anchor = 'w')\nfehlerdeni1 = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlerdeni1.place(x = 160, y = 260, anchor = 'w')\n\n#Nitri1\ntkinter.Label(main, text = 'Nitrifikation 1: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 310, anchor = 'e')\nausgabenitri1 = tkinter.Label(main, text = str(t_n1)\n , font = ('arial', 11, 'bold'))\nausgabenitri1.place(x = 245, y = 300, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 300, anchor = 'w')\neingabe2 = tkinter.Entry(main, width = 5)\neingabe2.place(x = 245, y = 320, anchor = 'e')\neingabe2but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = nitri1get)\neingabe2but.place(x = 250, y = 320, anchor = 'w')\nfehlernitri1 = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlernitri1.place(x = 160, y = 320, anchor = 'w')\n\n#Deni2\ntkinter.Label(main, text = 'Denitrifikation 2: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 370, anchor = 'e')\nausgabedeni2 = tkinter.Label(main, text = str(t_d2),\n font = ('arial', 11, 'bold'))\nausgabedeni2.place(x = 245, y = 360, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 360, anchor = 'w')\neingabe3 = tkinter.Entry(main, width = 5)\neingabe3.place(x = 245, y = 380, anchor = 'e')\neingabe3but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = deni2get)\neingabe3but.place(x = 250, y = 380, anchor = 'w')\nfehlerdeni2 = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlerdeni2.place(x = 160, y = 380, anchor = 'w')\n\n#Nitri2\ntkinter.Label(main, text = 'Nitrifikation 2: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 430, anchor = 'e')\nausgabenitri2 = tkinter.Label(main, text = str(t_n2),\n font = ('arial', 11, 'bold'))\nausgabenitri2.place(x = 245, y = 420, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 420, anchor = 'w')\neingabe4 = tkinter.Entry(main, width = 5)\neingabe4.place(x = 245, y = 440, anchor = 'e')\neingabe4but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = nitri2get)\neingabe4but.place(x = 250, y = 440, anchor = 'w')\nfehlernitri2 = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlernitri2.place(x = 160, y = 440, anchor = 'w')\n\n#Absetz-/Sedimentation\ntkinter.Label(main, text = 'Sedimentation: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 500, anchor = 'e')\nausgabesed = tkinter.Label(main, text = str(t_sed), font = ('arial', 11,\n 'bold'))\nausgabesed.place(x = 245, y = 490, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 490, anchor = 'w')\neingabe5 = tkinter.Entry(main, width = 5)\neingabe5.place(x = 245, y = 510, anchor = 'e')\neingabe5but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = sedget)\neingabe5but.place(x = 250, y = 510, anchor = 'w')\nfehlersed = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlersed.place(x = 160, y = 510, anchor = 'w')\n\n#Klarwasserabzug, Zulauf\ntkinter.Label(main, text = 'Klarwasserabzug: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 560, anchor = 'e')\nausgabeklabzug = tkinter.Label(main, text = str(t_abzug),\n font = ('arial', 11, 'bold'))\nausgabeklabzug.place(x = 245, y = 550, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 550, anchor = 'w')\neingabe6 = tkinter.Entry(main, width = 5)\neingabe6.place(x = 245, y = 570, anchor = 'e')\neingabe6but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = klabzugget)\neingabe6but.place(x = 250, y = 570, anchor = 'w')\nfehlerklabzug = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlerklabzug.place(x = 160, y = 570, anchor = 'w')\n\n#Stillstandszeit\ntkinter.Label(main, text = 'Stillstandszeit: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 620, anchor = 'e')\nausgabestillstand = tkinter.Label(main, text = str(t_still),\n font = ('arial', 11, 'bold'))\nausgabestillstand.place(x = 245, y = 610, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 610, anchor = 'w')\neingabe7 = tkinter.Entry(main, width = 5)\neingabe7.place(x = 245, y = 630, anchor = 'e')\neingabe7but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = stillget)\neingabe7but.place(x = 250, y = 630, anchor = 'w')\nfehlerstillstand = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlerstillstand.place(x = 160, y = 630, anchor = 'w')\n\n#Zyklus/ Gesamtzeit\ntkinter.Label(main, text = 'Zykluszeit: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 670, anchor = 'e')\nausgabetges = tkinter.Label(main, text = str(t_ges), font = ('arial', 11, 'bold'))\nausgabetges.place(x = 245, y = 670, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 670, anchor = 'w')\n\n#Zeitstempel anzeigen und aktualisieren\nzeitjetzt = tkinter.Label(main, text = '-', font = ('arial', 11, 'bold'))\nzeitjetzt.place(x = 625, y = 50, anchor = 'w')\nthreading.Thread(target = zeitstempelaktualisieren).start()\n\n#bei Programmstart SBR-Steuerung anschalten\nthreading.Thread(target = SBR1an).start()\n\n#CPU-Temperatur anzeigen und aktualisieren\ntkinter.Label(main, text = 'CPU-Temp.: ', font = ('arial', 11, 'bold')\n ).place (x = 625, y = 70, anchor = 'w')\ntkinter.Label(main, text = '°C', font = ('arial', 11, 'bold')\n ).place (x = 750, y = 70, anchor = 'w')\ncputemp = tkinter.Label(main, text = '-', font = ('arial', 11, 'bold'))\ncputemp.place(x = 750, y = 70, anchor = 'e')\nthreading.Thread(target = cputempaktualisieren).start()\n\n#Beenden-Schalter anordnen\nendeschalt = tkinter.Button(main, text = 'Beenden', command = ende,\n cursor = 'tcross', bg= 'white',\n font = ('arial', 11, 'bold'))\nendeschalt.place(x = 625, y = 20, anchor = 'w')\n\n# Programm auch beenden wenn das Fenster geschlossen wird\nmain.protocol(\"WM_DELETE_WINDOW\", ende)\n\n#loop\nmain.mainloop()\n", "repo_name": "markusbombeck/wastewatertreatment", "sub_path": "sbr_altenahr.py", "file_name": "sbr_altenahr.py", "file_ext": "py", "file_size_in_byte": 19136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "83", "api": [{"api_name": "tkinter.Tk", "line_number": 15, "usage_type": "call"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 25, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 25, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 43, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "RPi.GPIO.setmode", "line_number": 110, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 110, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 110, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 112, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 112, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 112, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 113, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 113, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 113, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 114, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 114, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 114, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 115, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 115, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 115, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 119, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 119, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 119, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 120, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 120, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 120, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 121, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 121, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 121, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 122, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 122, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 122, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 128, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 129, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 131, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 138, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 142, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 142, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 142, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 144, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 145, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 147, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 148, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 151, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 151, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 151, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 152, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 152, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 152, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 154, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 155, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 157, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 157, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 158, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 161, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 161, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 161, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 162, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 162, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 162, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 164, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 164, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 165, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 165, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 165, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 167, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 167, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 168, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 171, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 171, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 171, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 172, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 172, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 172, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 174, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 174, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 175, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 177, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 177, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 178, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 181, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 181, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 181, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 182, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 182, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 182, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 183, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 183, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 183, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 185, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 185, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 186, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 188, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 188, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 189, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 192, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 192, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 192, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 194, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 195, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 195, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 195, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 197, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 197, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 198, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 199, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 199, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 199, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 203, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 203, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 204, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 206, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 206, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 207, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 214, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 226, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 237, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 237, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 238, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 250, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 267, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 267, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 270, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 272, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 274, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 278, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 284, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 286, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 289, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 291, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 294, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 296, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 299, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 301, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 305, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 307, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 312, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 314, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 317, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 319, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 321, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 324, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 328, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 330, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 333, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 335, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 337, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 340, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 344, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 346, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 349, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 351, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 353, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 356, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 360, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 362, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 365, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 367, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 369, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 372, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 376, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 378, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 381, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 383, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 385, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 388, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 392, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 394, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 397, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 399, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 401, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 404, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 408, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 410, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 413, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 415, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 417, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 420, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 424, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 426, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 428, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 432, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 434, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 437, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 440, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 442, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 444, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 446, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 449, "usage_type": "call"}]}
+{"seq_id": "26856437298", "text": "from cloudvolume import CloudVolume, Storage\nfrom shapely.geometry import Polygon, box\nimport numpy as np\nfrom io import BytesIO\nfrom PIL import Image, ImageDraw\n\nmip = 6\nmask = Storage('gs://neuroglancer/pinky100_v0/edge_mask')\norder = Storage('gs://neuroglancer/pinky100_v0/z_order_corrected')\nout = CloudVolume('gs://neuroglancer/pinky100_v0/image_single_slices/roi', \n\t\t\t\t\t\t\t\t\t\t\t\t\tcdn_cache=True, mip=mip)\n\n# Get bounding box of a total slice\noffset = out.voxel_offset\nsize = tuple(out.shape[:2])\nx_slice = slice(offset[0], offset[0]+size[0])\ny_slice = slice(offset[1], offset[1]+size[1])\n\n# Build z remap dict\nf = order.get_file('dst_to_src.csv')\norder_arr = np.genfromtxt(BytesIO(f), dtype=np.int, \n\t\t\t\t\t\t\t\t\t\t\tdelimiter=',', skip_header=1)\ndst_to_src = {order_arr[i,0]:order_arr[i,1] \n\t\t\t\t\t\t\t\t\tfor i in range(order_arr.shape[0])}\n\n# Compile all ROI mask polygons (indexed by src_z)\n# ROI is translated by offset\nsrc_z_range = dst_to_src.values()\nsrc_z_filenames = list(map(str, src_z_range))\nmask_files = mask.get_files(src_z_filenames)\nmask_polygons = {}\nfor f in mask_files:\n\tif f['content'] is not None:\n\t\tpts = np.genfromtxt(BytesIO(f['content']), \n\t\t\t\t\t\t\t\tdtype=np.float, delimiter=',')\n\t\tpoly = Polygon(map(tuple, pts[:,:2] / 2**mip))\n\t\tpts = [(int(round(a[0]))-offset[0], int(round(a[1]))-offset[1]) for a \n\t\t\t\t\t\t\t\t\t\t\t\tin list(poly.exterior.coords)]\n\t\tmask_polygons[int(f['filename'])] = pts\n\n# Create mask image for each dst_z\ndst_z_range = dst_to_src.keys()\nfor dst_z in dst_z_range:\n\tsrc_z = dst_to_src[dst_z]\n\tpts = mask_polygons[src_z]\n\timg_mask = Image.new('1', size, 0)\n\tImageDraw.Draw(img_mask).polygon(pts, outline=1, fill=1)\n\timg_mask = np.transpose(np.array(img_mask)).astype(np.uint8)\n\tout[x_slice, y_slice, dst_z] = np.reshape(img_mask, img_mask.shape+(1,))\n", "repo_name": "seung-lab/Alembic", "sub_path": "src/tasks/python/preprocessing/ingest_masks.py", "file_name": "ingest_masks.py", "file_ext": "py", "file_size_in_byte": 1784, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "73", "api": [{"api_name": "cloudvolume.Storage", "line_number": 8, "usage_type": "call"}, {"api_name": "cloudvolume.Storage", "line_number": 9, "usage_type": "call"}, {"api_name": "cloudvolume.CloudVolume", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 21, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.genfromtxt", "line_number": 34, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 35, "usage_type": "attribute"}, {"api_name": "shapely.geometry.Polygon", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 46, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 46, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "31862844722", "text": "import os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime as dt\nfrom tqdm import tqdm\nimport sys\n##needed for debugging\nsys.path.append('.')\nfrom Database.db_api import db_api\n\n\ndef renaming_fun(x):\n #this fuction renames columns in df to better match db for upload later\n name_dict = {\n 'p_name':'name',\n 'M':'meters_made',\n 'C':'carries',\n 'P':'passes_made',\n 'T':'tackles_made',\n 'MT':'missed_tackles',\n 'TW':'turnovers_won',\n 'TC':'turnovers_conceded',\n 'DB':'defenders_beaten',\n 'TA':'try_assists',\n 'O':'offloads',\n 'CB':'clean_breaks',\n 'LW':'lineouts_won',\n 'LS':'lineouts_stolen',\n }\n try:\n return name_dict[x]\n except KeyError:\n return x\n\ndef check_for_duplicates(master_dict):\n#duplicates from scraping can only appear sequentially\n for i in range(1,158):\n if isinstance(master_dict[i], str):\n print(i, master_dict[i], sep=': ')\n continue\n if isinstance(master_dict[i-1], str):\n continue\n\n if master_dict[i]['FT_Score']==master_dict[i-1]['FT_Score']:\n print(i, 'Possible Dupe', sep=': ')\n\ndef extract_sub_data(Pos_Player_Array):\n #https://www.premiershiprugby.com/match-report/match-report-leicester-tigers-34-19-exeter-chiefs#report\n #weird inconsistancy in subbing on site double entries.\n #so we will always take last entry as those line up correctly\n #possible given issue where player will be subbed and not captured on site\n sub_flag = False\n p_name = []\n pos_num = []\n min_played = []\n is_sub = []\n for pos, player in Pos_Player_Array:\n #check if we are in replacements section\n if player == 'Replacements':\n sub_flag = True\n p_name.append(player)\n min_played.append(-1)\n is_sub.append(-1)\n pos_num.append(-1)\n continue\n\n subs = player.split(' ')\n\n #check if there are any subs\n if len(subs) == 1: #no subs\n p_name.append(subs[0])\n min_played.append(80)\n\n #issue where some subs do not have replacements data on site\n #Note on site they will populate sub data with players not subbed\n # when there is no sub. These players will be caught here and removed\n # by clean_df at end\n if sub_flag:\n #this -1 signifies not subbed in player to be removed\n is_sub.append(-1)\n pos_num.append(-1)\n continue\n\n is_sub.append(0)\n pos_num.append(int(pos))\n continue\n\n if sub_flag:\n p_name.append(subs[0])\n min_played.append(80 - int(subs[-1].strip(\"'\")))\n is_sub.append(1)\n #find position of sub\n try:\n pos_num.append(pos_num[p_name.index(subs[-2])])\n except: #in the case of a resub\n pos_num.append([i for i,item in enumerate(Pos_Player_Array) if subs[-2] in item[1]][0])\n continue\n\n p_name.append(subs[0])\n #second split here since website has some small inconcistancies in formatting\n min_played.append(int(subs[-1].split(' ')[-1].strip(\"'\")))\n is_sub.append(0)\n pos_num.append(int(pos))\n\n return(p_name, min_played, is_sub, pos_num)\n\ndef lookup_PlayGuid_list(p_names_df, list_ids):\n p_names, player_ids = zip(*list_ids)\n PlayGuids = []\n for p_name in p_names_df:\n try:\n PlayGuid_link = player_ids[p_names.index(p_name)]\n PlayGuid = PlayGuid_link.split('=')[1]\n except ValueError: #take player from relitave positon when lookup fails\n PlayGuid_link = player_ids[np.where(p_names_df==p_name)[0][0]]\n PlayGuid = PlayGuid_link.split('=')[1]\n\n PlayGuids.append(PlayGuid)\n return(PlayGuids)\n\ndef create_dict_special(special_list, at_home):\n headers = special_list[2]\n side_details = special_list[at_home]\n dict_lst = {h: d for h, d in zip(headers, side_details)}\n return dict_lst\n\ndef add_target_values(dict_key, dict_lst, df_col, df):\n #dictonary of known player miss spelling\n spelling = { 'Sam Lewis': 'Samuel Lewis',\n 'Melani Nanai Vai': 'Melani Nanai',\n 'Rus Tuima': 'Rusiate Tuima',\n 'Matty Proctor': 'Matt Proctor',\n 'Dan Thomas': 'Daniel Thomas',\n 'Dan du Preez': 'Daniel du Preez',\n 'Dom Morris': 'Dominic Morris',\n 'Jamie Shillcock': 'James Shillcock',\n 'Theo McFarland': 'Theodore McFarland',\n 'Matt Cornish': 'Matthew Cornish',\n 'Val Rapava Ruskin': 'Val Rapava-Ruskin',\n 'Seb Atkinson': 'Sebastien Atkinson',\n 'Elliott Obatoyinbo': 'Elliot Obatoyinbo',\n 'Semi Radradra Waqavatu': 'Semi Radradra',\n 'Ash Challenger': 'Ashley Challenger'\n }\n #penos\n try:\n players = [[p.split(',')[0], len(p.split(','))-1]for p in dict_lst[dict_key]]\n\n for p in players:\n #player name not recorded \n #TYPO HERE MAY NEED TO TEST\n if len(p[0])<=3:\n continue\n\n #player name not found\n if p[0] not in df['name'].values:\n try:\n df.loc[df['name'] == spelling[p[0]], df_col] = p[1]\n except:\n print('Cannot find player {0}'.format(p[0]))\n else:\n df.loc[df['name'] == p[0], df_col] = p[1]\n except:\n #if no penalty goals in game\n pass\n\ndef clean_df(df, list_ids, at_home, target_details):\n df['at_home'] = at_home\n\n #determine minutes played and subs and add to dataframe\n p_name, min_played, is_sub, pos_num = extract_sub_data(df[['Pos','Player']].values)\n df['name'] = p_name\n df['mins_played'] = min_played\n df['is_sub'] = is_sub\n df['position_num'] = pos_num \n\n #remove replacements column\n df.drop([p_name.index('Replacements')], inplace=True)\n\n #add in player ids\n df['playguid'] = lookup_PlayGuid_list(df['name'].values, list_ids)\n\n #drop reformatted columns\n df.drop(['Pos', 'Player'], axis=1, inplace=True)\n\n #fill in 0s\n df = df.replace('-',0)\n\n #rename columns for database\n df.columns = map(renaming_fun, df.columns)\n\n #add target detaild for penos, tries and convos\n dict_lst = create_dict_special(target_details, at_home)\n\n df['penalty_goals'] = 0\n add_target_values('Penalties',dict_lst, 'penalty_goals', df)\n\n df['tries'] = 0\n add_target_values('Tries',dict_lst, 'tries', df)\n #add_target_values('Penalty Tries',dict_lst, 'tries', df)\n\n df['conversions'] = 0\n add_target_values('Conversions',dict_lst, 'conversions', df)\n\n #remove players who did not play or we dont know for how long\n df = df[df['is_sub'] >= 0]\n\n #reset index\n df = df.reset_index(drop=True)\n return(df)\n\ndef load_to_db(year, loaded_dict):\n #connect to db\n db_tool = db_api()\n\n #legacy\n list_of_dfs = []\n\n #create compitition record\n table = 'Comps' \n insert_dict = {'name': 'Premiership',\n 'year': int(year)}\n comp = db_tool.insert(table, **insert_dict)\n\n for i in tqdm(range(0,len(loaded_dict))):\n #skip empty game data\n if isinstance(loaded_dict[i], str):\n print(i, loaded_dict[i], sep=': ')\n continue\n\n #Match Details\n match_dict = dict((k, loaded_dict[i][k]) for k in ('match_date', 'home_team', 'away_team', 'FT_Score', 'HT_Score'))\n table = 'Matches' \n p_table = 'Players'\n pm_table = 'Player_Matches'\n insert_dict = {'idComp': comp[0]['idComp'],\n 'date': dt.strptime(match_dict['match_date'], '%A %d %B %Y'),\n 'home': match_dict['home_team'],\n 'away': match_dict['away_team'],\n 'FT_Score': match_dict['FT_Score'],\n 'HT_Score': match_dict['HT_Score'],\n }\n match = db_tool.insert(table, **insert_dict)\n\n #home\n df1 = clean_df(loaded_dict[i]['home_df'], loaded_dict[i]['home_player_ids'],\n 1, loaded_dict[i]['target_details'])\n\n player_dict_list = df1[['playguid','name']].T.to_dict()\n player_match_dict_list = df1.drop(['name', 'playguid'], axis=1).T.to_dict()\n\n for j in range(0,len(player_dict_list)):\n player = db_tool.insert(p_table, **player_dict_list[j])\n #player match id\n id_dict = { 'idPlayer': player[0]['idPlayer'] , 'idMatch': match[0]['idMatch'] }\n insert_dict = {**id_dict, **player_match_dict_list[j]}\n db_tool.insert(pm_table, **insert_dict)\n \n #legacy\n list_of_dfs.append(df1)\n\n #away\n df2 = clean_df(loaded_dict[i]['away_df'], loaded_dict[i]['away_player_ids'], \n 0, loaded_dict[i]['target_details'])\n\n player_dict_list = df2[['playguid','name']].T.to_dict()\n player_match_dict_list = df2.drop(['name','playguid'], axis=1).T.to_dict()\n\n for j in range(0,len(player_dict_list)):\n player = db_tool.insert(p_table, **player_dict_list[j])\n #player match id\n id_dict = { 'idPlayer': player[0]['idPlayer'] , 'idMatch': match[0]['idMatch'] }\n insert_dict = {**id_dict, **player_match_dict_list[j]}\n db_tool.insert(pm_table, **insert_dict)\n\n #legacy\n list_of_dfs.append(df2)\n\n #legacy\n master_df = pd.concat(list_of_dfs)\n master_df.reset_index(inplace = True, drop=True)\n\n return master_df\n\n\npickle_path = os.getcwd() + '\\\\Scrapers\\\\Scraped Data\\\\premiership_matches.pkl'\nwith open(pickle_path, 'rb') as f:\n loaded_dict = pickle.load(f)\n\nmaster_df = load_to_db(2022, loaded_dict)\n\nsave_path = os.getcwd() + '\\\\Data Cleaners\\\\Cleaner_Data\\\\premiership_matches.csv'\n\nmaster_df.to_csv(save_path)\n\nloaded_dict[1]['home_df'].columns\n\n", "repo_name": "Aidzillafont/Rugby-Recruitment", "sub_path": "Data Cleaners/premiership_games_cleaner.py", "file_name": "premiership_games_cleaner.py", "file_ext": "py", "file_size_in_byte": 10143, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 116, "usage_type": "call"}, {"api_name": "Database.db_api.db_api", "line_number": 215, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 226, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 238, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 238, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 281, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 287, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 289, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 293, "usage_type": "call"}]}
+{"seq_id": "9778380578", "text": "import sys, os, yaml, re, json\nimport numpy as np, torch as th\nfrom lib import reorder\nimport lib\nimport argparse, collections\nfrom termcolor import cprint, colored\nimport statistics\nfrom PIL import Image\nfrom torchvision.transforms import functional as transfunc\nimport rich\nc = rich.get_console()\n\n\ndef PracticalAttack(argv):\n '''\n Attack a pre-trained model\n '''\n ag = argparse.ArgumentParser()\n ag.add_argument('-A', '--attack', type=str, default='SPSA')\n ag.add_argument('-e', '--epsilon', default=1./255., type=float)\n '''\n A NOTE ON SELECTION OF EPSILON (For Attacking JDModel)\n\n 0.062 (16/255) -> top5 go out of sight with little exception\n 0.031 ( 8/255) -> 1-of-top5 does not go out of sight\n 0.015 ( 4/255) -> 3-of-top5 does not go out of sight\n 0.008 ( 2/255) -> top5 within sight but not close to each other\n 0.004 ( 1/255) -> quite good. (and cannot be lower)\n\n For BingModel\n 1/255 -> topk very persistent\n 2/255 -> top3 very persistent\n 4/255 -> top1 starts to vary\n 8/255 -> looks appropriate.\n '''\n ag.add_argument('-M', '--model', type=str, choices=['JDModel', 'BingModel'])\n ag.add_argument('-v', '--verbose', action='store_true', help='verbose?')\n ag.add_argument('-Q', '--qbudget', type=int, default=500, help='query budget')\n ag.add_argument('-k', '--topk', type=int, default=5, help='generate permutation for topk')\n ag.add_argument('-c', '--canseek', type=int, default=50, help='length of returned ranking list')\n ag.add_argument('-l', '--payload', type=str, required=True, help='path to the payload image')\n ag.add_argument('-V', '--visualize', action='store_true')\n ag.add_argument('-O', '--oneshot', action='store_true')\n ag.add_argument('--randperm', action='store_true', help='use a random permutation instead')\n ag = ag.parse_args(argv)\n cprint(json.dumps(vars(ag), indent=4), 'yellow')\n\n # Process the arguments\n if ag.epsilon > 1.0:\n ag.epsilon = ag.epsilon / 255.\n assert(ag.topk > 1)\n\n # Load the payload image\n image = Image.open(ag.payload, mode='r').resize((224,224), Image.ANTIALIAS)\n query = transfunc.to_tensor(image).clone().unsqueeze(0)\n print(f'* Payload Image Info: shape={query.shape}')\n #tmp = transfunc.to_pil_image(query.squeeze(), mode='RGB')\n #tmp.show()\n #input('2')\n\n # Load the target model\n cprint(f'Setting up the \"{ag.model}\" Model')\n if ag.model == 'JDModel':\n model = getattr(lib.snapshop, ag.model)(canseek=ag.canseek)\n elif ag.model == 'BingModel':\n model = getattr(lib.bing, ag.model)(canseek=ag.canseek)\n else:\n raise ValueError('unsupported model')\n print(model)\n\n # Start attacking\n cprint(f'>_< Starting {ag.attack} Attack with Epsilon = {ag.epsilon:.3f}',\n 'red', None, ['bold', 'underline'])\n argsort, _ = model(query, id='init')\n orig_argsort = argsort.clone().detach()\n if not ag.randperm:\n rperm = th.LongTensor([1, 5, 4, 3, 2]) - 1 # manually specified order\n else:\n rperm = np.arange(ag.topk)\n np.random.shuffle(rperm)\n rperm = th.from_numpy(rperm)\n otopk = argsort[:len(rperm)]\n rtopk = otopk[rperm]\n cprint(f'> Original CanSee\\n {argsort.tolist()}', 'cyan')\n cprint(f'> Original TopK {otopk}', 'green')\n cprint(f'> Attacker Rperm {rperm}', 'yellow')\n cprint(f'> Expected TopK {rtopk}', 'red')\n if ag.oneshot:\n print('Exiting as requested oneshot mode.')\n exit(0)\n\n qr, r, score, mrank, aux = getattr(reorder, ag.attack)(model, query, rperm,\n eps=ag.epsilon, parallel=1, maxprobe=ag.qbudget, verbose=True)\n #argsort, _ = model(query, id='final')\n #cprint(f'> FINAL TopK', 'red')\n #cprint(argsort.tolist(), 'cyan')\n c.print('Final score:', score)\n\n\nif __name__ == '__main__':\n PracticalAttack(sys.argv[1:])\n", "repo_name": "cdluminate/advorder", "sub_path": "PracticalOA.py", "file_name": "PracticalOA.py", "file_ext": "py", "file_size_in_byte": 3874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "73", "api": [{"api_name": "rich.get_console", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "termcolor.cprint", "line_number": 46, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 54, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.functional.to_tensor", "line_number": 55, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 55, "usage_type": "name"}, {"api_name": "termcolor.cprint", "line_number": 62, "usage_type": "call"}, {"api_name": "lib.snapshop", "line_number": 64, "usage_type": "attribute"}, {"api_name": "lib.bing", "line_number": 66, "usage_type": "attribute"}, {"api_name": "termcolor.cprint", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 81, "usage_type": "call"}, {"api_name": "termcolor.cprint", "line_number": 84, "usage_type": "call"}, {"api_name": "termcolor.cprint", "line_number": 85, "usage_type": "call"}, {"api_name": "termcolor.cprint", "line_number": 86, "usage_type": "call"}, {"api_name": "termcolor.cprint", "line_number": 87, "usage_type": "call"}, {"api_name": "lib.reorder", "line_number": 92, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 101, "usage_type": "attribute"}]}
+{"seq_id": "8839949787", "text": "# USAGE\n# python pi_detect_drowsiness.py --cascade haarcascade_frontalface_default.xml --shape-predictor shape_predictor_68_face_landmarks.dat\n# python pi_detect_drowsiness.py --cascade haarcascade_frontalface_default.xml --shape-predictor shape_predictor_68_face_landmarks.dat --alarm 1\n\n# import the necessary packages\nimport sys\nsys.path.insert(0, \"/usr/local/lib/python3.5/dist-packages\")\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom imutils.video import VideoStream\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\nimport RPi.GPIO as GPIO\nimport time\nimport threading\n\ndef pin_int():\n\tglobal speaker,led_system,led_status,chair_mode1,chair_mode2\n\tspeaker=11\n\tled_system=33\n\tled_status=13\n\tchair_mode1=29\n\tchair_mode2=31\n\tGPIO.setmode(GPIO.BOARD)\n\tGPIO.setup(speaker,GPIO.OUT, initial = 0)\n\tGPIO.setup(led_system,GPIO.OUT, initial = 0)\n\tGPIO.setup(led_status,GPIO.OUT, initial = 1)\n\tGPIO.setup(chair_mode1,GPIO.OUT, initial = 0)\n\tGPIO.setup(chair_mode2,GPIO.OUT, initial = 0)\n#==========Define actions function==========================\nglobal count\nglobal x,y,detect_mode,old_status\ncount=0\ndetect_mode=0\nold_status=0\ndef led_blinking(count,pulse):\n\tfor i in range(count):\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(led_system,GPIO.HIGH)\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(led_system,GPIO.LOW)\ndef status_blinking():\n\t#detect_mode=0\n\t#if (detect_mode==0):\n\ttime.sleep(0.5)\n\tGPIO.output(led_status,GPIO.LOW)\n\ttime.sleep(0.5)\n\tGPIO.output(led_status,GPIO.HIGH)\ndef speaker_alert(count,pulse):\n\tfor i in range(count):\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(speaker,GPIO.HIGH)\n\t\ttime.sleep(pulse+0.3)\n\t\tGPIO.output(speaker,GPIO.LOW)\ndef led_blinking(count,pulse):\n\tfor i in range(count):\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(led_system,GPIO.HIGH)\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(led_system,GPIO.LOW)\n\n\n\ndef drownside_alert():\n\tglobal old_status\n\tprint (\"=========Starting Alert System=============\")\n\twhile True:\n\t\tif (old_status == 1):\n\t\t\tprint ( \"=Downside detected==\" )\n\t\t\tprint ( \"======>Triggered Speaker==\")\n\t\t\tGPIO.output(led_status,GPIO.LOW)\n\t\t\tGPIO.output(chair_mode2,GPIO.HIGH)\n\t\t\tGPIO.output(chair_mode1,GPIO.LOW)\n\t\t\tfor i in range(3):\n\t\t\t\ttime.sleep(0.2)\n\t\t\t\tGPIO.output(speaker,GPIO.HIGH)\n\t\t\t\tGPIO.output(led_system,GPIO.HIGH)\n\t\t\t\ttime.sleep(0.2+0.3)\n\t\t\t\tGPIO.output(led_system,GPIO.LOW)\n\t\t\t\tGPIO.output(speaker,GPIO.LOW)\n\t\t\tGPIO.output(chair_mode2,GPIO.LOW)\n\t\t#time.sleep(5)\n\t\tif (old_status == 2):\n \tprint ( \"==== No Downside detected ========\" )\n \t\tprint ( \"==== >Alert System Stopped\" )\n \tGPIO.output(speaker,GPIO.LOW)\n \tGPIO.output(led_system,GPIO.LOW)\n \tGPIO.output(led_status,GPIO.HIGH)\n \tGPIO.output(chair_mode1,GPIO.LOW)\n \tGPIO.output(chair_mode2,GPIO.LOW)\n\t\tif (old_status==0):\n\t\t\tstatus_blinking()\n\n#===========================================================\ndef euclidean_dist(ptA, ptB):\n\t# compute and return the euclidean distance between the two\n\t# points\n\treturn np.linalg.norm(ptA - ptB)\n\ndef eye_aspect_ratio(eye):\n\t# compute the euclidean distances between the two sets of\n\t# vertical eye landmarks (x, y)-coordinates\n\tA = euclidean_dist(eye[1], eye[5])\n\tB = euclidean_dist(eye[2], eye[4])\n\n\t# compute the euclidean distance between the horizontal\n\t# eye landmark (x, y)-coordinates\n\tC = euclidean_dist(eye[0], eye[3])\n\n\t# compute the eye aspect ratio\n\tear = (A + B) / (2.0 * C)\n\n\t# return the eye aspect ratio\n\treturn ear\n \n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--cascade\", required=True,\n\thelp = \"path to where the face cascade resides\")\nap.add_argument(\"-p\", \"--shape-predictor\", required=True,\n\thelp=\"path to facial landmark predictor\")\nap.add_argument(\"-a\", \"--alarm\", type=int, default=0,\n\thelp=\"boolean used to indicate if TraffHat should be used\")\nargs = vars(ap.parse_args())\n\n# check to see if we are using GPIO/TrafficHat as an alarm\n#if args[\"alarm\"] > 0:\n#\tfrom gpiozero import TrafficHat\n#\tth = TrafficHat()\n#\tprint(\"[INFO] using TrafficHat alarm...\")\n \n# define two constants, one for the eye aspect ratio to indicate\n# blink and then a second constant for the number of consecutive\n# frames the eye must be below the threshold for to set off the\n# alarm\nEYE_AR_THRESH = 0.3 # do nhay cua mat , mat nho thi tang len mat to thi giam xuong tu 0.2 den 0.33\nEYE_AR_CONSEC_FRAMES = 4 # so lan do mat nham, muon nhanh thi giam ve 2 muon cham thi tang len 8 -10 \n# xong nho save file roi ra desktop chay\n\n# initialize the frame counter as well as a boolean used to\n# indicate if the alarm is going off\nglobal COUNTER,ALARM_ON,status_ret\nCOUNTER=0\nALARM_ON= False\nstatus_ret=0\n# load OpenCV's Haar cascade for face detection (which is faster than\n# dlib's built-in HOG detector, but less accurate), then create the\n# facial landmark predictor\nprint(\"[INFO] loading facial landmark predictor...\")\ndetector = cv2.CascadeClassifier(args[\"cascade\"])\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\n\n# grab the indexes of the facial landmarks for the left and\n# right eye, respectively\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n\n# initialize the camera and grab a reference to the raw camera capture\n#camera = PiCamera()\n#camera.resolution = (640, 480)\n#camera.framerate = 32\n#vs= PiRGBArray(camera, size=(640, 480))\n#=============intit GPIO PIN================\npin_int()\nprint(\"[INFO] GPIO Init...\")\nx = threading.Thread(target=drownside_alert)\nx.start()\n#===========================================\n# start the video stream thread\n#==============================================\nprint(\"[INFO] starting video stream thread...\")\n#vs = VideoStream(src=0).start()\nvs = VideoStream(usePiCamera=True,resolution=(640,480),framerate=32).start()\n#vs= PiRGBArray(camera, size=(640, 480))\ntime.sleep(1.0)\n\n# loop over frames from the video stream\nwhile True:\n\t# grab the frame from the threaded video file stream, resize\n\t# it, and convert it to grayscale\n\t# channels)\n\t\n\tframe = vs.read()\n\tframe = imutils.resize(frame, width=450)\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n\t# detect faces in the grayscale frame\n\trects = detector.detectMultiScale(gray, scaleFactor=1.1, \n\t\tminNeighbors=5, minSize=(30, 30),\n\t\tflags=cv2.CASCADE_SCALE_IMAGE)\n\n\t# loop over the face detections\n\tfor (x, y, w, h) in rects:\n\t\t# construct a dlib rectangle object from the Haar cascade\n\t\t# bounding box\n\t\trect = dlib.rectangle(int(x), int(y), int(x + w),\n\t\t\tint(y + h))\n\n\t\t# determine the facial landmarks for the face region, then\n\t\t# convert the facial landmark (x, y)-coordinates to a NumPy\n\t\t# array\n\t\tshape = predictor(gray, rect)\n\t\tshape = face_utils.shape_to_np(shape)\n\n\t\t# extract the left and right eye coordinates, then use the\n\t\t# coordinates to compute the eye aspect ratio for both eyes\n\t\tleftEye = shape[lStart:lEnd]\n\t\trightEye = shape[rStart:rEnd]\n\t\tleftEAR = eye_aspect_ratio(leftEye)\n\t\trightEAR = eye_aspect_ratio(rightEye)\n\n\t\t# average the eye aspect ratio together for both eyes\n\t\tear = (leftEAR + rightEAR) / 2.0\n\n\t\t# compute the convex hull for the left and right eye, then\n\t\t# visualize each of the eyes\n\t\tleftEyeHull = cv2.convexHull(leftEye)\n\t\trightEyeHull = cv2.convexHull(rightEye)\n\t\tcv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\n\t\tcv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\n\n\t\t# check to see if the eye aspect ratio is below the blink\n\t\t# threshold, and if so, increment the blink frame counter\n\t\tif ear < EYE_AR_THRESH:\n\t\t\tCOUNTER += 1\n\n\t\t\t# if the eyes were closed for a sufficient number of\n\t\t\t# frames, then sound the alarm\n\t\t\tif COUNTER >= EYE_AR_CONSEC_FRAMES:\n\t\t\t\t# if the alarm is not on, turn it on\n#\t\t\t\tif not ALARM_ON:\n#\t\t\t\t\tALARM_ON = True\n#\n#\t\t\t\t\t# check to see if the TrafficHat buzzer should\n#\t\t\t\t\t# be sounded\n#\t\t\t\t\tif args[\"alarm\"] > 0:\n#\t\t\t\t\t\tth.buzzer.blink(0.1, 0.1, 10,\n#\t\t\t\t\t\t\tbackground=True)\n\t\t\t\t# draw an alarm on the frame\n\t\t\t\t#ALARM_ON = True\n\t\t\t\tdetect_mode=1\n\t\t\t\tstatus_ret=1\n\t\t\t\tcv2.putText(frame, \"DROWSINESS ALERT!\", (10, 30),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\t\t\t\tif (detect_mode==1):\n\t\t\t\t\tif (detect_mode != old_status):\n\t\t\t\t\t\told_status=detect_mode\n\t\t\t\t\t\t#x = threading.Thread(target=drownside_alert, args=[1,0,])\n\t\t\t\t\t\t#x.start()\n\t\t\t\t\t\t#COUNTER = 0\n\t\t# otherwise, the eye aspect ratio is not below the blink\n\t\t# threshold, so reset the counter and alarm\n\t\telse:\n\t\t\tCOUNTER = 0\n\t\t\tif (detect_mode==1):\n\t\t\t\t#ALARM_ON = False\n\t\t\t\tdetect_mode=3\n\t\t\t\told_status=0\n\t\t\t\t#x = threading.Thread(target=drownside_alert, args=[2,0,])\n\t\t\t\t#x.start()\n\t\t\t\tdetect_mode=0\n\t\t# draw the computed eye aspect ratio on the frame to help\n\t\t# with debugging and setting the correct eye aspect ratio\n\t\t# thresholds and frame counter\n\t\tcv2.putText(frame, \"EAR: {:.3f}\".format(ear), (300, 30),\n\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n \n\t# show the frame\n\tcv2.imshow(\"Frame\", frame)\t\n\t#if (detect_mode==0):\n\t\t#status_blinking()\n\t#GPIO.output(led_status,GPIO.LOW)\n\t#time.sleep(0.5)\n\tkey = cv2.waitKey(1) & 0xFF\n\t#GPIO.output(led_status,GPIO.HIGH)\n\t# if the `q` key was pressed, break from the loop\n\t#if key == ord(\"q\"):\n\t#\tbreak\n\n# do a bit of cleanup\n#cv2.destroyAllWindows()\nvs.stop()\n\n", "repo_name": "LuizKun/Drownside_detection", "sub_path": "FaBo9AXIS-MPU9250-Python/example/pi_detect_drowsiness.py", "file_name": "pi_detect_drowsiness.py", "file_ext": "py", "file_size_in_byte": 9337, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setmode", "line_number": 29, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 29, "usage_type": "name"}, {"api_name": "RPi.GPIO.BOARD", "line_number": 29, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 30, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 30, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 30, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 31, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 31, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 32, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 32, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 33, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 33, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 33, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 34, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 34, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 44, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 44, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 44, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 46, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 46, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 46, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 51, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 51, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 51, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 53, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 53, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 53, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 57, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 57, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 57, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 59, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 59, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 59, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 63, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 63, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 63, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 65, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 65, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 65, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 76, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 76, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 76, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 77, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 77, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 77, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 78, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 78, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 78, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 81, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 81, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 81, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 82, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 82, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 82, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 84, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 84, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 84, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 85, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 85, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 85, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 86, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 86, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 86, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 91, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 91, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 91, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 92, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 92, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 92, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 93, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 93, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 93, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 94, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 94, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 94, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 95, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 95, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 103, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 155, "usage_type": "call"}, {"api_name": "dlib.shape_predictor", "line_number": 156, "usage_type": "call"}, {"api_name": "imutils.face_utils.FACIAL_LANDMARKS_IDXS", "line_number": 160, "usage_type": "attribute"}, {"api_name": "imutils.face_utils", "line_number": 160, "usage_type": "name"}, {"api_name": "imutils.face_utils.FACIAL_LANDMARKS_IDXS", "line_number": 161, "usage_type": "attribute"}, {"api_name": "imutils.face_utils", "line_number": 161, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 171, "usage_type": "call"}, {"api_name": "imutils.video.VideoStream", "line_number": 178, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 180, "usage_type": "call"}, {"api_name": "imutils.resize", "line_number": 189, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 190, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 190, "usage_type": "attribute"}, {"api_name": "cv2.CASCADE_SCALE_IMAGE", "line_number": 195, "usage_type": "attribute"}, {"api_name": "dlib.rectangle", "line_number": 201, "usage_type": "call"}, {"api_name": "imutils.face_utils.shape_to_np", "line_number": 208, "usage_type": "call"}, {"api_name": "imutils.face_utils", "line_number": 208, "usage_type": "name"}, {"api_name": "cv2.convexHull", "line_number": 222, "usage_type": "call"}, {"api_name": "cv2.convexHull", "line_number": 223, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 224, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 225, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 248, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 249, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 270, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 271, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 274, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 279, "usage_type": "call"}]}
+{"seq_id": "43964646389", "text": "import pygame\nfrom objects.ball import Ball\nfrom objects.square import Square\nfrom funcs import *\nimport random\nballs_sp = []\n\n\ndef draw_line(pos):\n if pos[1] >= screen_height - 100 or pos == [10000000000, 10000000]:\n return\n k, b = get_k_and_b((x0, y0), pos)\n final = (-b // k, 0)\n pygame.draw.line(screen, pygame.Color(\"white\"), (x0, y0), final, width=1)\n\n\ndef spawn_balls(pos):\n balls_sp.append(Ball((x0, y0), pos, screen, screen_size))\n\n\ndef draw_lines_among_balls():\n if len(balls_sp) >= 2:\n for ball in balls_sp[1:]:\n for ball2 in balls_sp:\n pygame.draw.line(screen, pygame.Color('white'), ball2.get_coords(), ball.get_coords())\n\n\nif __name__ == '__main__':\n pygame.init()\n screen_size = screen_width, screen_height = 1000, 1000\n screen = pygame.display.set_mode(screen_size)\n screen.fill(pygame.Color('black'))\n running = True\n clock = pygame.time.Clock()\n pos = [10000000000, 10000000]\n x0 = screen_width // 2\n y0 = screen_height - 100\n need_draw_lines_among_balls = False\n need_draw_poligon = False\n need_draw_blocks = False\n squares = []\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEMOTION:\n pos = event.pos\n if event.type == pygame.MOUSEBUTTONDOWN and pos[1] < y0:\n spawn_balls(pos)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP or event.key == pygame.K_w:\n y0 -= 100\n y0 -= 100\n if y0 <= 0:\n y0 = 0\n balls_sp.clear()\n elif event.key == pygame.K_DOWN or event.key == pygame.K_s:\n y0 += 100\n if y0 >= screen_height:\n y0 = screen_height\n elif event.key == pygame.K_c:\n balls_sp.clear()\n elif event.key == pygame.K_g:\n need_draw_lines_among_balls = True if need_draw_lines_among_balls is False else False\n elif event.key == pygame.K_t:\n need_draw_poligon = True if need_draw_poligon is False else False\n elif event.key == pygame.K_b:\n squares = []\n need_draw_blocks = True if need_draw_blocks is False else False\n\n clock.tick(600)\n pygame.display.flip()\n screen.fill(pygame.Color('black'))\n pygame.draw.line(screen, pygame.Color('white'), (0, y0), (screen_width, y0), width=3)\n if need_draw_blocks and len(squares) == 0:\n squares = []\n for _ in range(10):\n squares.append(Square((random.randint(0, 700), random.randint(0, 600)), 100))\n if need_draw_lines_among_balls:\n draw_lines_among_balls()\n if need_draw_poligon and len(balls_sp) >= 3:\n pygame.draw.polygon(screen, pygame.Color('white'), [ball.get_coords() for ball in balls_sp])\n for square in squares:\n pygame.draw.rect(screen, pygame.Color('white'), square.rect)\n if pos[1] < y0:\n draw_line(pos)\n if balls_sp:\n for ball in balls_sp:\n ball.move(y0, squares)\n ball.render()\n pygame.quit()", "repo_name": "DulmievMusa/Balls_shooter", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3391, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "pygame.draw.line", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 14, "usage_type": "call"}, {"api_name": "objects.ball.Ball", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.K_c", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.K_g", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.K_t", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.K_b", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 72, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 74, "usage_type": "call"}, {"api_name": "objects.square.Square", "line_number": 78, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.draw.polygon", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "74792454635", "text": "from flask import Flask,jsonify,request\r\n\r\napp = Flask(__name__)\r\nalunos = []\r\nprofessorres = []\r\ndisciplinas = []\r\nofertadas = []\r\n@app.route('/alunos')\r\ndef retorna_alunos():\r\n return jsonify(alunos)\r\n@app.route('/alunos', methods=['POST'])\r\ndef add_aluno():\r\n new = request.json\r\n if 'nome' not in new.keys():\r\n return jsonify({'erro':'aluno sem nome'}),400\r\n for aluno in alunos:\r\n if aluno['id'] == new['id']:\r\n return jsonify({'erro':'id ja utilizada'}),400\r\n \r\n alunos.append(request.json)\r\n return jsonify(alunos),200\r\n \r\n@app.route('/alunos/')\r\ndef retorna_aluno_id(id):\r\n for aluno in alunos:\r\n if aluno['id'] == id:\r\n return jsonify(aluno) \r\n return jsonify({'erro':'aluno nao encontrado',}),400\r\n@app.route('/reseta',methods=['POST'])\r\ndef reseta_():\r\n alunos.clear()\r\n professorres.clear()\r\n disciplinas.clear()\r\n ofertadas.clear()\r\n return jsonify({'ok':'resetado com sucesso'}),200\r\n@app.route('/alunos/',methods=['DELETE'])\r\ndef delete_aluno(id):\r\n for index,aluno in enumerate(alunos):\r\n if aluno['id'] == id:\r\n del alunos[index]\r\n return\r\n return jsonify({'erro':'aluno nao encontrado'}),400\r\n@app.route(\"/alunos/\",methods=['PUT'])\r\ndef edita(id):\r\n dados = request.json\r\n if 'nome' not in dados.keys():\r\n return jsonify({'erro':'aluno sem nome'}),400\r\n for aluno in alunos:\r\n if aluno['id'] == id:\r\n aluno['nome'] = dados['nome']\r\n return jsonify(alunos)\r\n \r\n return jsonify({'erro':'aluno nao encontrado',}),400\r\n@app.route(\"/professores\",methods=['GET'])\r\ndef professor_show():\r\n return jsonify(professorres)\r\n@app.route(\"/professores\",methods=['POST'])\r\ndef add_prof():\r\n prof=request.json\r\n if 'nome' in prof.keys():\r\n for professor in professorres:\r\n if professor['id'] == prof['id']:\r\n return jsonify({'erro':'id ja utilizada'}),400\r\n professorres.append(prof)\r\n return jsonify({}),200\r\n else:\r\n return jsonify({'erro':'professor sem nome'}),400 \r\n@app.route('/professores/')\r\ndef retorna_professor(id):\r\n for professor in professorres:\r\n if professor['id'] == id:\r\n return jsonify(professor)\r\n return jsonify({'erro':'professor nao encontrado'}),400\r\n@app.route('/professores/',methods=['DELETE'])\r\ndef delete_prof(id):\r\n\r\n for index,prof in enumerate(professorres):\r\n if prof['id'] == id :\r\n del professorres[index]\r\n return \r\n return jsonify({'erro':'professor nao encontrado'}),400\r\n@app.route('/professores/',methods=['PUT'])\r\ndef edita_prof(id):\r\n g = request.json\r\n if 'nome' in g.keys():\r\n for professor in professorres:\r\n if professor['id'] == id:\r\n professor['nome'] = g['nome']\r\n return jsonify(professor)\r\n return jsonify({'erro':'professor nao encontrado'}),400\r\n else:\r\n return jsonify({'erro':'professor sem nome'}),400\r\n@app.route('/disciplinas',methods=['GET'])\r\ndef diciplinas_retorno():\r\n return jsonify( disciplinas)\r\n@app.route('/disciplinas',methods=['POST'])\r\ndef add_diciplinas():\r\n dados = request.json\r\n if 'nome' not in dados.keys() or 'id' not in dados.keys() or 'carga_horaria' not in dados.keys() or 'plano_ensino' not in dados.keys() or 'status' not in dados.keys():\r\n return jsonify({'erro':'diciplina sem nome'}),400\r\n for disciplina in disciplinas:\r\n if disciplina['id'] == dados['id']:\r\n return jsonify({'erro':'id ja utilizada'}),400\r\n \r\n else:\r\n disciplinas.append(dados)\r\n return jsonify(),200\r\n@app.route('/disciplinas/',methods=['GET'])\r\ndef diciplina_id(id):\r\n for diciplina in disciplinas:\r\n if diciplina['id'] == id:\r\n return jsonify(diciplina)\r\n return jsonify({'erro':'disciplina nao encontrada'}),400\r\n@app.route('/disciplinas/',methods=['DELETE'])\r\ndef delete_diciplina(id):\r\n for index,diciplina in enumerate(disciplinas):\r\n if diciplina['id'] == id:\r\n del disciplinas[index]\r\n return 'Diciplina deletada com sucesso'\r\n return jsonify({'erro':'disciplina nao encontrada'}),400\r\n@app.route('/disciplinas/',methods=['PUT'])\r\ndef edita_diciplina(id):\r\n dados=request.json\r\n for diciplina in disciplinas:\r\n if diciplina['id'] == id:\r\n if 'nome' in dados.keys():\r\n diciplina['nome'] = dados['nome']\r\n if 'status' in dados.keys():\r\n disciplina['status'] = dados['status']\r\n if 'plano_ensino' in dados.keys():\r\n disciplina['plano_ensino'] = dados['plano_ensino']\r\n if 'carga_horaria' in dados.keys():\r\n disciplina['carga_horaria'] = dados['carga_horaria']\r\n return 'Diciplina Atualizada com sucesso'\r\n return jsonify({'erro':'disciplina nao encontrada'}),400\r\n@app.route(\"/ofertadas\",methods=['GET'])\r\ndef retrona_ofertada():\r\n return jsonify(ofertadas)\r\n@app.route('/ofertadas',methods=['POST'])\r\ndef add_ofertada():\r\n dados = request.json\r\n\r\n if 'id_professor' in dados.keys():\r\n professor_valido = False\r\n ok_add = True\r\n for professor in professorres:\r\n if professor['id'] == dados['id_professor']:\r\n professor_valido = True\r\n if professor_valido == False:\r\n return jsonify({'erro' : 'id professor invalido'}),400\r\n print(len(dados.keys()))\r\n if len(dados.keys()) >=5:\r\n for ofertada in ofertadas:\r\n if ofertada['id'] == dados['id']:\r\n return jsonify({'erro':'id ja utilizada'}),400\r\n ofertadas.append(dados)\r\n return jsonify(),200\r\n else:\r\n return jsonify({'erro':'data faltando'}),400\r\n@app.route('/ofertadas/',methods=['GET'])\r\ndef retorna_id_disp(id):\r\n for ofertada in ofertadas:\r\n if ofertada['id'] == id:\r\n return jsonify( ofertada)\r\n return jsonify({'erro':'ofertada nao encontrada'}),400 \r\n@app.route('/ofertadas/',methods=['DELETE'])\r\ndef deleta_ofertada(id):\r\n for index,ofertada in enumerate(ofertadas):\r\n if ofertada['id'] == id:\r\n del ofertadas[index]\r\n return jsonify(),200\r\n return jsonify({'erro':'ofertada nao encontrada'}),400 \r\n@app.route('/ofertadas/',methods=['PUT'])\r\ndef edita_ofertada(id):\r\n dados = request.json\r\n for ofertada in ofertadas:\r\n if ofertada['id'] == id:\r\n if 'ano' in dados.keys():\r\n ofertada['ano'] =dados['ano']\r\n if 'semestre' in dados.keys():\r\n ofertada['semestre'] = dados['semestre']\r\n if 'turma' in dados.keys():\r\n ofertada['turma'] = dados['turma']\r\n if 'data' in dados.keys():\r\n ofertada['data'] = dados['data']\r\n if 'id_professor' in dados.keys():\r\n ofertada['id_professor']=dados['id_professor']\r\n return jsonify(),200\r\n return jsonify({'erro':'ofertada nao encontrada'}),400\r\n\r\n \r\nif __name__ == '__main__':\r\n app.run(port=5002,debug=True,host='localhost')\r\n", "repo_name": "gabrielmonzato20/ac6_distr", "sub_path": "ac5.py", "file_name": "ac5.py", "file_ext": "py", "file_size_in_byte": 7260, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 123, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 123, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 157, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 159, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 175, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 175, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 188, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 189, "usage_type": "call"}]}
+{"seq_id": "73001022315", "text": "'''\nCreated on Sep 23, 2013\n\n@author: mmartin\n'''\n\nfrom base64 import (\n b64encode\n)\n\nimport json\nimport requests\n\nfrom mezeo_cdmi.objectid import ObjectId\nfrom mezeo_dds.access import (\n decodeKey,\n)\n\n\nCDMICONTAINER = 'application/cdmi-container'\nCDMIOBJECT = 'application/cdmi-object'\nCHILDREN = 'children'\nCOUNT = 10\nOBJECTTYPE = 'objectType'\nROOT = '/cdmi'\nSTORAGEROOT = '/cdmi/storage_root'\nSYSTEMDOMAIN = 'system_domain/'\nTOPLEVELDOMAIN = '/cdmi/cdmi_domains'\nVERSION = '1.0.0'\n\n\nclass CDMIExports(object):\n '''\n Return a list of all exported containers\n '''\n\n def __init__(self, host, userid, passwd, verify, debug=False):\n '''\n Initialize this object\n '''\n self.debug = debug\n self.domains = []\n self.exports = []\n self.headers = {'X-CDMI-Specification-Version': '1.0.1'}\n self.storage_root = None\n self.verify = verify\n # prepend url with http if it's not already there\n if (host.startswith('http:') or\n host.startswith('https:')):\n self.host = host\n else:\n self.host = 'http://%s' % host\n\n self.auth_basic = \"Basic %s\" % b64encode(\"%s:%s\" %\n (userid, passwd))\n\n self.headers['Authorization'] = self.auth_basic\n\n def get_DDS_key(self, objectid):\n '''\n Convert a CDMI object id into a CSP object id\n '''\n return decodeKey(ObjectId.decode(objectid))\n\n def _exports(self, path, returnCSP=False):\n domain = self.GET(path, self.headers)\n if CHILDREN not in domain:\n return\n if not domain['objectName'].endswith('/'):\n domain['objectName'] = '%s/' % domain['objectName']\n children = domain[CHILDREN]\n for child in children:\n child_uri = '%s%s' % (path, child)\n data = self.GET(child_uri, self.headers)\n if not data:\n return\n if not data['objectName'].endswith('/'):\n data['objectName'] = '%s/' % data['objectName']\n if 'exports' not in data:\n if CHILDREN in data:\n for gchild in data[CHILDREN]:\n self._exports('%s%s' % (child_uri, gchild),\n returnCSP=returnCSP)\n continue\n if returnCSP:\n key = self.get_DDS_key(data['objectID'])\n self.exports.append(b64encode(key))\n else:\n self.exports.append(child_uri)\n\n def get_exports(self, path, returnCSP=False):\n self.exports = []\n self._exports(path, returnCSP=returnCSP)\n return self.exports\n\n def GET(self, path, headers=None):\n '''\n Get data from CDMI\n '''\n if not headers:\n headers = self.headers\n url = '%s%s' % (self.host, path)\n res = requests.get(url=url,\n allow_redirects=True,\n headers=headers,\n verify=self.verify)\n if res.status_code in [200]:\n return json.loads(res.text)\n else:\n print ('Could not connect to server. Response status %d'\n % res.status_code)\n\n def set_header(self, name, value):\n self.headers[name] = value\n\n def set_headers(self, headers):\n self.headers = headers\n if 'Authorization' not in self.headers:\n self.headers['Authorization'] = self.auth_basic\n", "repo_name": "building39/meztools", "sub_path": "CDMIExports.py", "file_name": "CDMIExports.py", "file_ext": "py", "file_size_in_byte": 3502, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "base64.b64encode", "line_number": 54, "usage_type": "call"}, {"api_name": "mezeo_dds.access.decodeKey", "line_number": 63, "usage_type": "call"}, {"api_name": "mezeo_cdmi.objectid.ObjectId.decode", "line_number": 63, "usage_type": "call"}, {"api_name": "mezeo_cdmi.objectid.ObjectId", "line_number": 63, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 87, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 103, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 108, "usage_type": "call"}]}
+{"seq_id": "13289164217", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('apuser', '0006_alterpriceuser_user_type'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='alterpriceuser',\n name='created',\n field=models.DateTimeField(verbose_name='Дата регистрации', default=datetime.datetime(2015, 6, 4, 11, 43, 14, 207197), auto_now_add=True),\n preserve_default=False,\n ),\n ]\n", "repo_name": "BlackPie/alterprice", "sub_path": "src/apps/apuser/migrations/0007_alterpriceuser_created.py", "file_name": "0007_alterpriceuser_created.py", "file_ext": "py", "file_size_in_byte": 581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "40257974414", "text": "import streamlit as st \nfrom multiapp import MultiApp\nfrom apps import habits, home, chatbot, song, habits_log\n\ndef local_css():\n with open(\"style.css\") as f:\n st.markdown(f'', unsafe_allow_html=True)\n\nlocal_css()\n\napp = MultiApp()\napp.add_app(\"homepage\", home.app)\napp.add_app(\"dashboard\", habits.app)\napp.add_app(\"habits log\", habits_log.app)\napp.add_app(\"chat with a friend!\", chatbot.app)\n\n\napp.run()", "repo_name": "mhl343/AI-Hackathon", "sub_path": "streamlit-dash/dashboard.py", "file_name": "dashboard.py", "file_ext": "py", "file_size_in_byte": 439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "73", "api": [{"api_name": "streamlit.markdown", "line_number": 7, "usage_type": "call"}, {"api_name": "multiapp.MultiApp", "line_number": 11, "usage_type": "call"}, {"api_name": "apps.home.app", "line_number": 12, "usage_type": "attribute"}, {"api_name": "apps.home", "line_number": 12, "usage_type": "name"}, {"api_name": "apps.habits.app", "line_number": 13, "usage_type": "attribute"}, {"api_name": "apps.habits", "line_number": 13, "usage_type": "name"}, {"api_name": "apps.habits_log.app", "line_number": 14, "usage_type": "attribute"}, {"api_name": "apps.habits_log", "line_number": 14, "usage_type": "name"}, {"api_name": "apps.chatbot.app", "line_number": 15, "usage_type": "attribute"}, {"api_name": "apps.chatbot", "line_number": 15, "usage_type": "name"}]}
+{"seq_id": "43262502666", "text": "#%%\n\nimport os\n\nos.makedirs(os.path.join('..', 'data'), exist_ok=True)\ndata_file = os.path.join('..', 'data', 'house_tiny.csv')\nwith open(data_file, 'w') as f:\n f.write('NumRooms,Alley,Price\\n') # 列名\n f.write('NA,Pave,127500\\n') # 每行表示一个数据样本\n f.write('2,NA,106000\\n')\n f.write('4,NA,178100\\n')\n f.write('NA,NA,140000\\n')\n\n#%%\n\n# 如果没有安装pandas,只需取消对以下行的注释来安装pandas\n# !pip install pandas\nimport pandas as pd\n\ndata = pd.read_csv(data_file)\nprint(data)\n\n#%%\n\ninputs, outputs = data.iloc[:, 0:2], data.iloc[:, 2]\n\n#%%\n\ninputs,outputs\n\n#%%\n\nc=inputs.iloc[:,0:1]\nc\n\n#%%\n\nc=c.fillna(c.mean())\nc\n\n#%%\n\ninputs.iloc[:,0:1]=c\n\n#%%\n\ninputs\n\n#%%\n\nc=inputs.iloc[:,0]\n\n#%%\n\nc\n\n#%%\n\nc.mean()\ninputs.iloc[:,0]=c.fillna(c.mean())\n\n#%%\n\ninputs.iloc[:,0]\n\n#%%\n\ninputs\n\n#%%\n\ninputs=pd.get_dummies(inputs,dummy_na=True)*1\ninputs\n\n#%%\n\n\n\n#%%\n\nimport torch\n\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)\nX, y\n\n#%%\n\n\n", "repo_name": "mozhumz/machine_learning_py", "sub_path": "com/hyj/nlp/dnn_study/data_pre_handle.py", "file_name": "data_pre_handle.py", "file_ext": "py", "file_size_in_byte": 992, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.makedirs", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 83, "usage_type": "call"}]}
+{"seq_id": "35861120999", "text": "# -*- coding: utf-8 -*-\n\nfrom lib import Logger\nfrom BaseSinker import BaseSinker\nfrom lib import HtmlFactory\nfrom lib import MailSender\n\n\nclass MultiSectionEmailSinker(BaseSinker):\n def __init__(self, tag, address, title, contexts):\n BaseSinker.__init__(self)\n self.name = \"MultiSectionEmailSinker\"\n self.tag = tag\n self.contexts = contexts\n self.address = address\n self.title = title\n\n def sink(self, sections):\n if sections is None or len(sections) == 0:\n Logger.e(self.id() + \"no data, abort\")\n return\n content = \"\"\n for context in self.contexts:\n key = context[\"id\"]\n section = sections[key]\n content += \"
\"\n content += HtmlFactory.getTable(context[\"title\"], context['cols'].split(','), section)\n\n content = HtmlFactory.getHeader(content)\n\n title = \"[%s]\" % self.title\n mail_to = [self.address]\n #MailSender.sendMail2(mail_to, title, content)\n MailSender.sendMail(mail_to, title, content)\n\nif __name__ == \"__main__\":\n testTag = \"test\"\n testTitle = \"test email sinker\"\n testContexts = [ {\"id\":\"s1\", \"title\": \"section 1\", \"cols\": \"col1, col2\"}, {\"id\":\"s2\", \"title\": \"section 2\", \"cols\": \"colA, colB\"} ]\n testAddress = \"ulyx.yang@ndpmedia.com\"\n testSinker = MultiSectionEmailSinker(testTag, testAddress, testTitle, testContexts)\n testData = { \"s1\":[[\"123\", \"456\"], [\"789\", \"100\"]], \"s2\":[[\"1123\", \"4156\"], [\"7189\", \"1100\"]], }\n testSinker.sink(testData)\n\n\n", "repo_name": "yangxu02/datax", "sub_path": "MultiSectionEmailSinker.py", "file_name": "MultiSectionEmailSinker.py", "file_ext": "py", "file_size_in_byte": 1552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "BaseSinker.BaseSinker", "line_number": 9, "usage_type": "name"}, {"api_name": "BaseSinker.BaseSinker.__init__", "line_number": 11, "usage_type": "call"}, {"api_name": "BaseSinker.BaseSinker", "line_number": 11, "usage_type": "name"}, {"api_name": "lib.Logger.e", "line_number": 20, "usage_type": "call"}, {"api_name": "lib.Logger", "line_number": 20, "usage_type": "name"}, {"api_name": "lib.HtmlFactory.getTable", "line_number": 27, "usage_type": "call"}, {"api_name": "lib.HtmlFactory", "line_number": 27, "usage_type": "name"}, {"api_name": "lib.HtmlFactory.getHeader", "line_number": 29, "usage_type": "call"}, {"api_name": "lib.HtmlFactory", "line_number": 29, "usage_type": "name"}, {"api_name": "lib.MailSender.sendMail", "line_number": 34, "usage_type": "call"}, {"api_name": "lib.MailSender", "line_number": 34, "usage_type": "name"}]}
+{"seq_id": "37741742882", "text": "import socket\r\nimport threading\r\nimport multiprocessing\r\nimport time\r\n\r\nserveraddr1 = ('127.0.0.1', 8080)#定义server的ip和地址\r\nserveraddr2 = ('127.0.0.1', 8081)#定义server的ip和地址\r\n\r\ndef client_thread(client,port):#客户端建立socket\r\n client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n client.bind(('127.0.0.1', port))\r\n client.listen(1)\r\n while(True):\r\n clientsocket, address = client.accept()\r\n print(clientsocket.recv(1024).decode('utf-8'))\r\n\r\ndef client_connect(serveraddr,client_name,input_text):\r\n #login指令\r\n target = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n target.connect(serveraddr)\r\n id = client_name #input('请输入你的用户名: ')\r\n target.send(('none login '+id).encode('utf-8'))\r\n port = int(target.recv(1024).decode('utf-8'))\r\n print('链接成功')\r\n target.close()\r\n #启动客户端\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n threading.Thread(target=client_thread, args=(client,port)).start()\r\n #开始发送指令\r\n while(True):\r\n time.sleep(1)\r\n input_text=input_method()\r\n put = id+' '+ input_text #input()\r\n target = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n target.connect(serveraddr)\r\n target.send(put.encode('utf-8'))\r\n callback = target.recv(1024).decode('utf-8')\r\n if(callback!='success'):\r\n print(callback)\r\n target.close()\r\n if put.split(' ')[1] == 'close':#关闭客户端\r\n break\r\n client.close()\r\n\r\ndef input_method():\r\n input_text = input()\r\n return input_text\r\n\r\ndef main():\r\n input_text=input_method()\r\n p1 = multiprocessing.Process(target=client_connect,args=(serveraddr1,\"daniel\",input_text))\r\n p1.start()\r\n #p2 = multiprocessing.Process(target=client_connect,args=(serveraddr2,\"daniel\"))\r\n #p2.start()\r\n\r\nif __name__ == \"__main__\":\r\n main()", "repo_name": "Daniel-Why/daylifecode", "sub_path": "TWOIM/test/client1.py", "file_name": "client1.py", "file_ext": "py", "file_size_in_byte": 1941, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "socket.SOL_SOCKET", "line_number": 10, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 10, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 19, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 19, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 19, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 27, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 27, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 27, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 34, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 34, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "5533778387", "text": "import os\nimport sys\n\nimport psycopg2\n\nif len(sys.argv) not in (3 , 4):\n\traise Exception(\"Must pass input filename and target table name as arguments\")\n\nif len(sys.argv) == 3:\n\t_, infile_name, target_table = sys.argv\n\tcolumns = None\nelse:\n\t_, infile_name, target_table, columns = sys.argv\n\tcolumns = columns.split(',')\n\n\nwith open(infile_name, 'r') as data:\n\twith psycopg2.connect(os.environ['DB_URI']) as conn:\n\n\t\tcopy_params = {\n\t\t\t'file': data,\n\t\t\t'table': target_table,\n\t\t\t'sep': '|',\n\t\t}\n\t\tif columns:\n\t\t\tcopy_params.update({'columns': columns})\n\n\t\tcur = conn.cursor()\n\t\tcur.copy_from(**copy_params)\n\t\tconn.commit()\n", "repo_name": "davidshere/irs-aws-990s", "sub_path": "etl/write_to_db.py", "file_name": "write_to_db.py", "file_ext": "py", "file_size_in_byte": 621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}]}
+{"seq_id": "6123109049", "text": "import pandas as pd\nimport math\nimport matplotlib.pyplot as mp\nimport matplotlib.dates as dates\nimport numpy as np\n\n# Use PYVENV in Development\n\n# Also read dist files. \n\nmocap_filename = \"mocap_valentijn/beach_repr_2b_velocity_M.tsv\"\n\n'''\n(PYVENV) pberck@ip30-163 MoCap % head beach_repr_2b_velocity_M.tsv\nNO_OF_FRAMES\t20887\nNO_OF_DATA_TYPES\t28\nFREQUENCY\t200\nTIME_STAMP\t2022-11-22, 21:34:11\nDATA_INCLUDED\tVelocity\nDATA_TYPES\tx_LWristOut_vel_M\tX_LWristIn_vel_M\tx_LHandOut_vel_M\tx_LHandIn_vel_M\tx_RWristOut_vel_M\tx_RWristIn_vel_M\tx_RHandOut_vel_M\tx_RHandIn_vel_M\tx_RThumb1_vel_M\tx_RThumbTip_vel_M\tx_RIndex2_vel_M\tx_RIndexTip_vel_M\tx_RMiddle2_vel_M\tx_RMiddleTip_vel_M\tx_RRing2_vel_M\tx_RRingTip_vel_M\tx_RPinky2_vel_M\tx_RPinkyTip_vel_M\tx_LThumb1_vel_M\tx_LThumbTip_vel_M\tx_LIndex2_vel_M\tx_LIndexTip_vel_M\tx_LMiddle2_vel_M\tx_LMiddleTip_vel_M\tx_LRing2_vel_M\tx_LRingTip_vel_M\tx_LPinky2_vel_M\tx_LPinkyTip_vel_M\n\n\n1\t0.00000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\n2\t0.00500\t12.438\t14.483\t73.739\t109.118\t12.927\t11.315\t11.895\t12.532\t14.295\t14.165\t73.827\t265.273\t191.557\t22.159\t16.281\t16.797\t19.674\t26.238\t161.599\t184.912\t109.435\t119.367\t142.256\t161.760\t164.861\t208.077\t124.859\t144.613\n'''\n\n# Data is index plus timestamp plus 64*3 data points?\n\ndf = None\ndf_rows = []\nlnum = 0\nfreq = 200 # available in file header\nwith open(mocap_filename, \"r\") as f:\n for line in f:\n bits = line.split()\n #print( lnum, len(bits) )\n if len(bits) > 1:\n if bits[0] == \"FREQUENCY\":\n freq = int(bits[1])\n if bits[0] == \"DATA_TYPES\":\n column_names = bits # We add a Timestamp later to this one too\n print( column_names )\n if len(bits) > 15 and lnum > 7:\n bits = [ float(x) for x in bits ]\n df_rows.append( bits[1:] ) #skip index number\n lnum += 1\n\n#for x in column_names:\n# print( x )\n# check for \"finger movement only\", \"hand movement\", \"arm movement\" (not in this data, use distances?)\n'''\nx_LWristOut_vel_M\nx_LWristIn_vel_M\n\nx_LHandOut_vel_M\nx_LHandIn_vel_M\n\nx_LThumb1_vel_M\nx_LThumbTip_vel_M\n\nx_LIndex2_vel_M\nx_LIndexTip_vel_M\n\nx_LMiddle2_vel_M\nx_LMiddleTip_vel_M\n\nx_LRing2_vel_M\nx_LRingTip_vel_M\n\nx_LPinky2_vel_M\nx_LPinkyTip_vel_M\n\n# ---\n\nx_RWristOut_vel_M\nx_RWristIn_vel_M\n\nx_RHandOut_vel_M\nx_RHandIn_vel_M\n\nx_RThumb1_vel_M\nx_RThumbTip_vel_M\n\nx_RIndex2_vel_M\nx_RIndexTip_vel_M\n\nx_RMiddle2_vel_M\nx_RMiddleTip_vel_M\n\nx_RRing2_vel_M\nx_RRingTip_vel_M\n\nx_RPinky2_vel_M\nx_RPinkyTip_vel_M\n'''\n\ngroup_LHand = [\"x_LWristOut_vel_M\", \"x_LWristIn_vel_M\", \"x_LHandOut_vel_M\", \"x_LHandIn_vel_M\"]\ngroup_LFingers = [\"x_LThumb1_vel_M\", \"x_LThumbTip_vel_M\", \"x_LIndex2_vel_M\", \"x_LIndexTip_vel_M\",\n \"x_LMiddle2_vel_M\", \"x_LMiddleTip_vel_M\", \"x_LRing2_vel_M\", \"x_LRingTip_vel_M\",\n \"x_LPinky2_vel_M\", \"x_LPinkyTip_vel_M\"]\n\ngroup_RHand = [\"x_RWristOut_vel_M\", \"x_RWristIn_vel_M\", \"x_RHandOut_vel_M\", \"x_RHandIn_vel_M\"]\ngroup_RFingers = [\"x_RThumb1_vel_M\", \"x_RThumbTip_vel_M\", \"x_RIndex2_vel_M\", \"x_RIndexTip_vel_M\",\n \"x_RMiddle2_vel_M\", \"x_RMiddleTip_vel_M\", \"x_RRing2_vel_M\", \"x_RRingTip_vel_M\",\n \"x_RPinky2_vel_M\", \"x_RPinkyTip_vel_M\"]\n\n\ncolumn_names[0] = \"Timestamp\"\ndf = pd.DataFrame(df_rows, columns = column_names)\n#df['Time'] = pd.to_datetime(df['Timestamp']) # not used\ndf['x_LWristOut_vel_M_T'] = np.where( df[\"x_LWristOut_vel_M\"] > 240, 240, 0 )\nprint( df )\n\n# plot, with \"x=0\" interesting plot\ndf.plot(\n x=0, #df[\"Time\"],\n #y=[1,2,3,4,5,6,7,8,9,10],\n y=[1,2, 5,6], #[\"x_LWristOut_vel_M\"],\n kind=\"line\",\n figsize=(16, 8)\n)\n\n# ----------------------------\n\n# Read the dist data\ndf_dists = pd.read_csv(\"beach_repr_2b_dists.tsv\", sep=\"\\t\")\nprint( df_dists )\n\n#print( \",\".join(sorted(df_dists.columns)) )\n'''\nx_BackL,x_BackR,x_Chest,x_HeadFront,x_HeadL,x_HeadR,x_HeadTop,\n\nx_LAnkleOut,x_LArm,x_LElbowOut,x_LForefootIn,x_LForefootOut,x_LHandIn,x_LHandOut,x_LHeelBack,x_LIndex2,x_LIndexTip,x_LKneeOut,x_LMiddle2,x_LMiddleTip,x_LPinky2,x_LPinkyTip,x_LRing2,x_LRingTip,x_LShin,x_LShoulderBack,x_LShoulderTop,x_LThigh,x_LThumb1,x_LThumbTip,x_LToeTip,x_LWristIn,x_LWristOut,\n\nx_RAnkleOut,x_RArm,x_RElbowOut,x_RForefootIn,x_RForefootOut,x_RHandIn,x_RHandOut,x_RHeelBack,x_RIndex2,x_RIndexTip,x_RKneeOut,x_RMiddle2,x_RMiddleTip,x_RPinky2,x_RPinkyTip,x_RRing2,x_RRingTip,x_RShin,x_RShoulderBack,x_RShoulderTop,x_RThigh,x_RThumb1,x_RThumbTip,x_RToeTip,x_RWristIn,x_RWristOut,\n\nx_SpineTop,x_WaistLBack,x_WaistLFront,x_WaistRBack,x_WaistRFront\n'''\n\n# Create a dataframe with \"distance moved across threshold\" indicators.\ndf_dists_t = pd.DataFrame()\ndf_dists_t[\"Timestamp\"] = df_dists[\"Timestamp\"]\n\nfor sensor in [\"x_LArm\", \"x_LElbowOut\", \"x_LHandIn\",\"x_LHandOut\", \"x_LShoulderBack\",\"x_LShoulderTop\",\n \"x_LThigh\",\"x_LThumb1\",\"x_LThumbTip\",\"x_LToeTip\",\"x_LWristIn\",\"x_LWristOut\"]:\n df_dists_t[sensor+'_T'] = np.where( df_dists[sensor] > 1, 3, 0 )\n\nprint( df_dists_t )\n\nfig, axes = mp.subplots(nrows=2, ncols=1, figsize=(12,6), sharex=True, sharey=True)\nfig.suptitle( \"distances\" )\n\ncol = np.where(df_dists_t[\"x_LElbowOut_T\"] > 1, 'r', 'b')\nsiz = np.where(df_dists_t[\"x_LElbowOut_T\"] > 1, 1, 0)\n\naxes[0].plot(\n df_dists[\"Timestamp\"].values,\n df_dists[\"x_LElbowOut\"].values\n)\naxes[0].set_title(\"x_LElbowOut and marker\")\naxes[0].scatter(\n df_dists[\"Timestamp\"].values,\n df_dists_t[\"x_LElbowOut_T\"].values,\n s=siz, c=col\n)\naxes[1].set_title(\"x_RElbowOut\")\naxes[1].plot(\n df_dists[\"Timestamp\"].values,\n df_dists[\"x_RElbowOut\"].values\n)\n\n\ndf_dists.plot(\n x=\"Timestamp\",\n #y=[1,2,3,4,5,6,7,8,9,10],\n y=[\"x_LArm\", \"x_RArm\", \"x_LHandOut\", \"x_RHandOut\"],\n kind=\"line\",\n figsize=(16, 8)\n)\n#axes[0].scatter(\n# \"Timestamp\",\n# \"x_LWristOut_vel_M_T\",\n# marker='o',\n# s=siz, c=col, #\"red\",\n# data=df,\n# label=\"\"\n#)\n\n# ----------------------------\n\n# Plot\nfig, axes = mp.subplots(nrows=2, ncols=1, figsize=(16,8), sharex=True, sharey=True)\n\n''' see mocap_vel00.py\naxes[0].plot(\n \"Timestamp\",\n \"x_LWristOut_vel_M\",\n data=df,\n label=\"LW\"\n)\ncol = np.where(df[\"x_LWristOut_vel_M_T\"]<200,'b', 'r')\nsiz = np.where(df[\"x_LWristOut_vel_M_T\"]<200,0,1)\naxes[0].scatter(\n \"Timestamp\",\n \"x_LWristOut_vel_M_T\",\n marker='o',\n s=siz, c=col, #\"red\",\n data=df,\n label=\"\"\n)\naxes[0].legend(loc=\"upper right\")\n'''\n\nfor sensor in [\"x_RThumb1_vel_M\", \"x_RThumbTip_vel_M\", \"x_RIndex2_vel_M\", \"x_RIndexTip_vel_M\", \"x_RMiddle2_vel_M\", \"x_RMiddleTip_vel_M\", \"x_RRing2_vel_M\", \"x_RRingTip_vel_M\", \"x_RPinky2_vel_M\", \"x_RPinkyTip_vel_M\"]:\n axes[0].plot(\n \"Timestamp\",\n sensor,\n data=df\n )\n#axes[1].legend(loc=\"upper right\")\nbox = axes[0].get_position()\naxes[0].set_position([box.x0, box.y0 + box.height * 0.12, box.width, box.height * 0.88])\naxes[0].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=6)\n\n#axes[1].plot(\n# \"Timestamp\",\n# \"x_LHandOut_vel_M\",\n# data=df\n#)\nfor sensor in [\"x_LThumb1_vel_M\", \"x_LThumbTip_vel_M\", \"x_LIndex2_vel_M\", \"x_LIndexTip_vel_M\", \"x_LMiddle2_vel_M\", \"x_LMiddleTip_vel_M\", \"x_LRing2_vel_M\", \"x_LRingTip_vel_M\", \"x_LPinky2_vel_M\", \"x_LPinkyTip_vel_M\"]:\n axes[1].plot(\n \"Timestamp\",\n sensor,\n data=df\n )\n#axes[1].legend(loc=\"upper right\")\nbox = axes[1].get_position()\naxes[1].set_position([box.x0, box.y0 + box.height * 0.12, box.width, box.height * 0.88])\naxes[1].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=6)\n\n# Use \"np.condition\" to determine hand/finger/arm movements? (1/0 columns)\n\nmp.show()\n'''\n# For creating new column with multiple conditions\nconditions = [\n (df['Base Column 1'] == 'A') & (df['Base Column 2'] == 'B'),\n (df['Base Column 3'] == 'C')]\nchoices = ['Conditional Value 1', 'Conditional Value 2']\ndf['New Column'] = np.select(conditions, choices, default='Conditional Value 1')\n\nsiz = np.where(df[\"x_LWristOut_vel_M_T\"]<200,0,1)\n\nconditions = [\n df['gender'].eq('male') & df['pet1'].eq(df['pet2']),\n df['gender'].eq('female') & df['pet1'].isin(['cat', 'dog'])\n]\nchoices = [5,5]\ndf['points'] = np.select(conditions, choices, default=0)\nprint(df)\n'''\n", "repo_name": "pberck/MoCap", "sub_path": "mocap_cmb_00.py", "file_name": "mocap_cmb_00.py", "file_ext": "py", "file_size_in_byte": 8251, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "pandas.DataFrame", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}]}
+{"seq_id": "2444760010", "text": "from django.urls import path, include\nfrom rest_framework import routers\nfrom .views import ClientView, MailView, MessagesByMailView\n\n\nrouter = routers.DefaultRouter()\nrouter.register('client', ClientView)\nrouter.register('mail', MailView)\nrouter.register('mailMessages', MessagesByMailView)\n\nurlpatterns = [\n path('', include(router.urls)),\n]", "repo_name": "Sniperat/Notification_service", "sub_path": "main/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 6, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 6, "usage_type": "name"}, {"api_name": "views.ClientView", "line_number": 7, "usage_type": "argument"}, {"api_name": "views.MailView", "line_number": 8, "usage_type": "argument"}, {"api_name": "views.MessagesByMailView", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "11118110791", "text": "import numpy as np\nimport pandas as pd\n\nfrom torch.utils import data\n\nfrom sklearn import preprocessing\n\n\nclass JASMIN(data.Dataset):\n def __init__(\n self,\n root: str,\n split: str,\n x_vars: list = None,\n t_var: str = \"tot_aod\",\n y_vars: list = None,\n t_bins: int = 2,\n ) -> None:\n super(JASMIN, self).__init__()\n # Handle default values\n if x_vars is None:\n x_vars = [\n \"RH900\",\n \"RH850\",\n \"RH700\",\n \"LTS\",\n \"EIS\",\n \"w500\",\n \"whoi_sst\",\n ]\n if y_vars is None:\n y_vars = [\"l_re\", \"liq_pc\", \"cod\", \"cwp\"]\n # Read csv\n df = pd.read_csv(root, index_col=0)\n # Filter AOD and Precip values\n df = df[df.tot_aod.between(0.07, 1.0)]\n df = df[df.precip < 0.5]\n # Make train test valid split\n days = df[\"timestamp\"].unique()\n days_valid = set(days[5::7])\n days_test = set(days[6::7])\n days_train = set(days).difference(days_valid.union(days_test))\n # Fit preprocessing transforms\n df_train = df[df[\"timestamp\"].isin(days_train)]\n self.data_xfm = preprocessing.StandardScaler()\n self.data_xfm.fit(df_train[x_vars].to_numpy())\n self.treatments_xfm = preprocessing.KBinsDiscretizer(\n n_bins=t_bins, encode=\"onehot-dense\"\n )\n self.treatments_xfm.fit(df_train[t_var].to_numpy().reshape(-1, 1))\n self.targets_xfm = preprocessing.StandardScaler()\n self.targets_xfm.fit(df_train[y_vars].to_numpy())\n # Split the data\n if split == \"train\":\n _df = df[df[\"timestamp\"].isin(days_train)]\n elif split == \"valid\":\n _df = df[df[\"timestamp\"].isin(days_valid)]\n elif split == \"test\":\n _df = df[df[\"timestamp\"].isin(days_test)]\n # Set variables\n self.data = self.data_xfm.transform(_df[x_vars].to_numpy(dtype=\"float32\"))\n self.treatments = self.treatments_xfm.transform(\n _df[t_var].to_numpy(dtype=\"float32\").reshape(-1, 1)\n )[:, 1:]\n self.targets = self.targets_xfm.transform(_df[y_vars].to_numpy(dtype=\"float32\"))\n # Variable properties\n self.dim_input = self.data.shape[-1]\n self.dim_targets = self.targets.shape[-1]\n self.dim_treatments = t_bins - 1\n self.data_names = x_vars\n self.target_names = y_vars\n self.treatment_names = [t_var]\n\n @property\n def data_frame(self):\n data = np.hstack(\n [\n self.data_xfm.inverse_transform(self.data),\n self.treatments,\n self.targets_xfm.inverse_transform(self.targets),\n ],\n )\n return pd.DataFrame(\n data=data,\n columns=self.data_names + self.treatment_names + self.target_names,\n )\n\n def __len__(self) -> int:\n return len(self.targets)\n\n def __getitem__(self, index) -> data.dataset.T_co:\n return self.data[index], self.treatments[index], self.targets[index]\n", "repo_name": "OATML/clouds", "sub_path": "clouds/datasets/jasmin.py", "file_name": "jasmin.py", "file_ext": "py", "file_size_in_byte": 3135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 9, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 45, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.KBinsDiscretizer", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 47, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.utils.data.dataset", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 91, "usage_type": "name"}]}
+{"seq_id": "3850365787", "text": "import komand\nfrom .schema import RunInput, RunOutput\n\n# Custom imports below\nimport json\nimport base64\nimport requests\nfrom komand_try_bro.util import utils\n\n\nclass Run(komand.Action):\n def __init__(self):\n super(self.__class__, self).__init__(\n name=\"run\", description=\"Upload PCAP file\", input=RunInput(), output=RunOutput()\n )\n\n def run(self, params={}):\n server = self.connection.server\n pcap = base64.b64decode(params.get(\"pcap\"))\n scripts = params.get(\"scripts\")\n version = params.get(\"version\", \"master\")\n if pcap:\n checksum = utils.maybe_upload_pcap(server, pcap, self.logger)\n else:\n raise Exception(cause=\"Error: No PCAP supplied\")\n sources = utils.load_scripts(scripts, self.logger)\n self.logger.info(\"Run: Supplied Scripts: %s\", sources)\n req = {\"sources\": sources, \"version\": version, \"pcap\": checksum}\n data = json.dumps(req)\n headers = {\"Content-type\": \"application/json\"}\n res = requests.post(server + \"/run\", data=data, headers=headers).json()\n if res[\"stdout\"] != \"\":\n self.logger.info(res[\"stdout\"])\n return {\n \"id\": res[\"job\"],\n \"url\": \"{server}/#/trybro/saved/{job}\".format(server=server, job=res[\"job\"]),\n }\n\n def test(self):\n server = self.connection.server\n res = requests.get(server)\n if res.status_code != 200:\n raise Exception(cause=\"Test: Unsuccessful HTTP status code returned\")\n return {}\n", "repo_name": "rapid7/insightconnect-plugins", "sub_path": "plugins/try_bro/komand_try_bro/actions/run/action.py", "file_name": "action.py", "file_ext": "py", "file_size_in_byte": 1553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 59, "dataset": "github-code", "pt": "73", "api": [{"api_name": "komand.Action", "line_number": 11, "usage_type": "attribute"}, {"api_name": "schema.RunInput", "line_number": 14, "usage_type": "call"}, {"api_name": "schema.RunOutput", "line_number": 14, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 19, "usage_type": "call"}, {"api_name": "komand_try_bro.util.utils.maybe_upload_pcap", "line_number": 23, "usage_type": "call"}, {"api_name": "komand_try_bro.util.utils", "line_number": 23, "usage_type": "name"}, {"api_name": "komand_try_bro.util.utils.load_scripts", "line_number": 26, "usage_type": "call"}, {"api_name": "komand_try_bro.util.utils", "line_number": 26, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "24137127133", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('list', views.listfunc, name='list'),\n path('create', views.ScrapeCreate.as_view(), name='create'),\n path('update/', views.DiaryUpdate.as_view(), name='update'),\n path('analysis', views.analysis, name='analysis'),\n path('signup', views.signupfunc, name='signup'),\n path('login', views.loginfunc, name='login'),\n path('logout', views.logoutfunc, name='logout'),\n path('graph', views.graph, name='graph'),\n]", "repo_name": "shuya106/healthcare_webapp", "sub_path": "HEALTHCHECK/myhealth/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "39923045483", "text": "# ~~~ cnn.py ~~~\n# This file creates and trains the convolutional neural network \n\nimport dataset\nimport config\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n\n# loading with pickle (doesn't really work)\n# X = pickle.load(open(\"X_features.pickle\", \"rb\"))\n# y = pickle.load(open(\"y_labels.pickle\", \"rb\"))\n\ndef train_model(num_epochs, v_ratio):\n # loading the data in\n # X is the features, y is the labels\n # NOTE: we don't have test data yet\n # (X_train, y_train, X_test, y_test) = dataset.load_dataset()\n try:\n (X_train, y_train) = dataset.load_dataset()\n except FileNotFoundError:\n print(f\"error: file(s) {config.TR_FEAT_FILENAME}, {config.TR_LABL_FILENAME} don't exist\")\n print(\"note: try generating them using option (1)\")\n return\n\n # This model has 3 layers with an input shape of 65x65x1 (1 since the images\n # are grayscale) and can have 28 possible categories\n model = Sequential()\n model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(config.IMG_SIZE, config.IMG_SIZE, 1))) # 1 = grayscale\n model.add(MaxPooling2D((2, 2)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Flatten())\n model.add(Dense(28, activation='relu'))\n model.add(Dense(len(config.CATEGORIES), activation='softmax')) # Softmax, since multiclass\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n # Actually train and save the result of the model\n # Batch size seems to affect accuracy (>=256 results in lower accuracy)\n model.fit(X_train, y_train, epochs=num_epochs, batch_size=64, validation_split=v_ratio)\n model.save(config.MODEL_FILENAME)\n\ndef model_summary():\n try:\n model = load_model(config.MODEL_FILENAME)\n model.summary()\n except OSError:\n print(\"error: file \" + config.MODEL_FILENAME + \" not found\")\n print(\"note: try creating/training it using option (2)\")\n\n# Evaluate the model on the test set\n# accuracy = model.evaluate(X_test, y_test, verbose=0)[1]\n# print(f'Test Accuracy: {accuracy * 100:.2f}%')\n\n", "repo_name": "sama305/SignLanguageML", "sub_path": "cnn.py", "file_name": "cnn.py", "file_ext": "py", "file_size_in_byte": 2168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "83", "api": [{"api_name": "dataset.load_dataset", "line_number": 19, "usage_type": "call"}, {"api_name": "config.TR_FEAT_FILENAME", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.TR_LABL_FILENAME", "line_number": 21, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 28, "usage_type": "call"}, {"api_name": "config.IMG_SIZE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 34, "usage_type": "call"}, {"api_name": "config.CATEGORIES", "line_number": 34, "usage_type": "attribute"}, {"api_name": "config.MODEL_FILENAME", "line_number": 40, "usage_type": "attribute"}, {"api_name": "keras.models.load_model", "line_number": 44, "usage_type": "call"}, {"api_name": "config.MODEL_FILENAME", "line_number": 44, "usage_type": "attribute"}, {"api_name": "config.MODEL_FILENAME", "line_number": 47, "usage_type": "attribute"}]}
+{"seq_id": "42763507585", "text": "# Least squares solving\n# y = m x + b\n# y0 = m x0 + b\n# y1 = m x1 + b\n# y2 = m x2 + b\n# y3 = m x3 + b\n# y4 = m x4 + b\n# equivalent to A p = y\n# with\n# A = [x0 1\n# x1 1\n# x2 1\n# x3 1\n# x4 1]\n# p = [m\n# b]\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.random import default_rng\n\nrng = default_rng(1)\n\nN = 100\nx = np.array(range(N))\nm = 0.5\nb = 1\n\nnoise_std = 0.1\ny = x * m + b\nnoise = rng.standard_normal(len(y)) * noise_std\ny_with_noise = y + noise\n\nA = np.array([x, np.ones(len(x))]).T\nsolution = np.linalg.lstsq(a=A, b=y_with_noise)\n# m_est = solution[0][0]\n# b_est = solution[0][1]\nm_est, b_est = solution[0]\n\ny_est = x * m_est + b_est\n\n\nplt.plot(x, y, 'k', label='Original data')\nplt.plot(x, y_with_noise, 'rx', label='Noisy data')\nplt.plot(x, y_est, 'g', label='Fitted line')\nplt.legend()\nplt.xlim([-1, N])\nplt.ylim([0, N/2 + b])\nplt.show()\n", "repo_name": "jhbrito/IVC2324", "sub_path": "Exercicio11.1Leastsquares.py", "file_name": "Exercicio11.1Leastsquares.py", "file_ext": "py", "file_size_in_byte": 889, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "numpy.random.default_rng", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linalg.lstsq", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 35, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]}
+{"seq_id": "29821917588", "text": "def plot_loss_curves(history):\n \"\"\"\n Plots model performance of loss and accuracy,\n both training and validationd scores\n\n Args:\n history (keras.src.callbacks.History): tensorflow history object\n\n Returns:\n plots training/validation loss and accuracy metrics.\n \"\"\"\n import matplotlib.pyplot as plt\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n accuracy = history.history['accuracy']\n val_accuracy = history.history['val_accuracy']\n\n epochs = range(len(history.history['loss']))\n\n # Utwórz siatkę 1x2 (jeden wiersz, dwie kolumny) dla wykresów\n fig, axs = plt.subplots(1, 2, figsize=(8, 4))\n\n # Wykres straty\n axs[0].plot(epochs, loss, label='training')\n axs[0].plot(epochs, val_loss, label='validate')\n axs[0].set_title('Loss')\n axs[0].set_xlabel('Epochs')\n axs[0].set_ylabel('Score')\n axs[0].legend()\n\n # Wykres dokładności\n axs[1].plot(epochs, accuracy, label='training')\n axs[1].plot(epochs, val_accuracy, label='validate')\n axs[1].set_title('Accuracy')\n axs[1].set_xlabel('Epochs')\n axs[1].set_ylabel('%')\n axs[1].legend()\n\n plt.tight_layout() # Zapewnia odpowiednie rozmieszczenie wykresów\n plt.show()\n", "repo_name": "neon-symeon/tensorflow_hacks", "sub_path": "tf_hacks_v_231007.py", "file_name": "tf_hacks_v_231007.py", "file_ext": "py", "file_size_in_byte": 1238, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]}
+{"seq_id": "74966029354", "text": "import os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import *\nfrom icecream.icecream import ic\nfrom services.resource_connectors import *\nfrom services.config_manager_temp import Config_Manager\n\n\nclass Activity_Logger(object):\n _instance = None\n\n def __new__(cls, config_manager: Config_Manager, sshfs_connector: SSHFS_Connector, clear: bool = False):\n if cls._instance == None or clear:\n cls._instance = object.__new__(cls)\n return cls._instance\n\n def __init__(self, config_manager: Config_Manager, sshfs_connector: SSHFileSystem, clear: bool = False):\n self.config_manager = config_manager\n self.sshfs_connector = sshfs_connector\n self.setup_log_file()\n\n def log_activity(self, identifier):\n file_entries = self.sshfs_connector.read_file(self.log_file)\n file_entries.insert(0, f\"{identifier}\\n\")\n self.sshfs_connector.write_file(self.log_file, file_entries)\n\n def setup_log_file(self) -> None:\n self.log_file = f\"{self.config_manager.log_directory}/{datetime.now().date()}.log\"\n if not self.sshfs_connector.exists(self.log_file):\n print(f\"Log FILE not found. Log file created: {self.log_file}\")\n self.sshfs_connector.create_file(self.log_file)\n else:\n print(f\"Log File: {self.log_file}\")\n\n @property\n def recent_log_file(self) -> Any:\n ls_list = self.sshfs_connector.ls(\n self.config_manager.log_directory, detail=True)\n log_files = [entry for entry in ls_list if \"file\" in entry[\"type\"]\n if entry[\"name\"].endswith(\".log\")]\n return max(log_files, key=lambda dictio: dictio[\"mtime\"])[\"name\"]\n\n @property\n def latest_activity(self):\n with self.sshfs_connector.open(self.recent_log_file, \"r\") as stream:\n activity = stream.readline()\n if len(activity) == 0:\n return None\n return activity.strip(\"\\n\")\n", "repo_name": "normanclt/github_actions", "sub_path": "services/activity_logger.py", "file_name": "activity_logger.py", "file_ext": "py", "file_size_in_byte": 1966, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "services.config_manager_temp.Config_Manager", "line_number": 13, "usage_type": "name"}, {"api_name": "services.config_manager_temp.Config_Manager", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}]}
+{"seq_id": "12970520116", "text": "from django.shortcuts import render\nimport requests\n\n# Create your views here.\n\ndef index(request):\n\n try:\n context = None\n if 'city' in request.POST:\n city = request.POST['city']\n else:\n city = 'dubai'\n \n appid = 'c6b66c97f9e2dc63cea06554d6e294b3'\n url = 'https://api.openweathermap.org/data/2.5/weather'\n params = {\n 'q': city,\n 'appid': appid,\n 'units': 'metric'\n }\n req = requests.get(url=url, params=params)\n res = req.json()\n \n try:\n name = res['name']\n country = res['sys']['country']\n temp = res['main']['temp']\n temp_min = res['main']['temp_min']\n temp_max = res['main']['temp_max']\n humid = res['main']['humidity']\n status = res['weather'][0]['main']\n icon = res['weather'][0]['icon']\n wind = res['wind']['speed']\n except:\n return render(request, 'core/error.html')\n \n\n context = {\n 'name': name,\n 'country': country,\n 'temp': temp,\n 'temp_min': temp_min,\n 'temp_max': temp_max,\n 'humid': humid,\n 'status': status,\n 'icon': icon,\n 'wind': wind,\n }\n\n return render(request, 'core/index.html', context)\n \n except:\n return render(request, 'core/error.html')", "repo_name": "nooto-code/weather-app", "sub_path": "core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1467, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}]}
+{"seq_id": "23182354599", "text": "# -*- coding: utf-8 -*-\r\n#!/usr/bin/python\r\n\r\nimport logging\r\nimport sys\r\n\r\nimport paramiko\r\nimport redis\r\n\r\nfrom ZapretInfoXMLParser import ZapretInfoXMLParser\r\nfrom QuaggaConfig import QuaggaConfig\r\n\r\n\r\nclass ZapretInfoDB(object):\r\n\r\n def __init__(self):\r\n self.host = '194.54.64.53'\r\n self.user = 'icmrsu'\r\n self.secret = 'gr@peb1ke'\r\n self.port = 22\r\n self.dump_file_path = '/gost-ssl/rzs/dump/dump.xml'\r\n self.r = redis.StrictRedis(host='localhost', port=6379, db=0)\r\n self.quagga = QuaggaConfig()\r\n\r\n # Зададим параметры логгирования\r\n self.logger = logging.getLogger(__name__)\r\n self.logger.setLevel(logging.INFO)\r\n formatter = logging.Formatter(u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s')\r\n console_handler = logging.StreamHandler(sys.stdout)\r\n console_handler.setLevel(logging.INFO)\r\n console_handler.setFormatter(formatter)\r\n file_handler = logging.FileHandler('zapret-info-db.log')\r\n file_handler.setLevel(logging.INFO)\r\n file_handler.setFormatter(formatter)\r\n self.logger.addHandler(console_handler)\r\n self.logger.addHandler(file_handler)\r\n\r\n def update_domains(self):\r\n # Получаем обновленный дамп реестра запрещенных сайтов с удаленного сервера по SFTP\r\n # и загружаем его в базу данных Redis\r\n client = paramiko.SSHClient()\r\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n client.connect(hostname=self.host, username=self.user, password=self.secret, port=self.port)\r\n sftp = client.open_sftp()\r\n\r\n self.logger.info('Start updating Redis ZapretInfo database from new xml dump...')\r\n with sftp.open(self.dump_file_path, 'r') as f:\r\n zapret_info_xml = ZapretInfoXMLParser(f)\r\n zapret_info_xml.get_domains()\r\n for domain in zapret_info_xml.domains:\r\n self.r.sadd('new_domains', domain)\r\n self.logger.info('New ZapretInfo dump contain %s domains' % len(zapret_info_xml.domains))\r\n\r\n # Получаем список доменов, которые были исключены из Реестра запрещенных сайтов\r\n self.r.sdiffstore('domains_for_delete', 'domains', 'new_domains')\r\n self.logger.info('After update required for delete %s domains' % self.r.scard('domains_for_delete'))\r\n # Подмениваем текущий список доменов обновленным\r\n self.r.rename('new_domains', 'domains')\r\n # Удаляем устаревшие домены и их IP адреса из базы данных Redis и BGP конфигурации Quagga\r\n self.delete_domains()\r\n\r\n def delete_domains(self):\r\n # Удаляем устаревшие домены и их IP адреса из базы данных Redis и BGP конфигурации Quagga\r\n ip_for_delete = set()\r\n # Удаляем все множества с IP адресами для доменов, которые были исключены из Реестра запрещенных сайтов\r\n #for d in range(self.r.scard('domains_for_delete')):\r\n # domain = self.r.spop('domains_for_delete')\r\n for domain in self.r.smembers('domains_for_delete'):\r\n # Сначала определим и сохраним множество IP адресов всех доменов которые необходимо удалить из Quagga\r\n ip_for_delete = ip_for_delete.union(self.r.smembers(domain))\r\n # Затем удаляем само множесвво из базы данных Redis\r\n self.logger.info('deleted domain ' + domain.encode('utf-8') + ' from Redis database')\r\n self.logger.info('domain %s had following addresses: %s' % (domain.encode('utf-8'), self.r.smembers(domain)))\r\n self.r.delete(domain)\r\n # Удаляем из Quagga определенные ранее IP адреса, домены которых были исключены из реестра запрещенных сайтов\r\n if ip_for_delete:\r\n self.quagga.delete_bgp_networks(ip_for_delete)\r\n\r\n def add_domains(self):\r\n # Добавляем в конфигурацию Quagga накопившиеся в базе Redis новые IP адреса\r\n # для доменов из реестра запрещенных сайтов\r\n current_ip_set = set()\r\n for domain in self.r.smembers('domains'):\r\n current_ip_set = current_ip_set.union(self.r.smembers(domain))\r\n self.logger.info('Current Redis database contain %s IP addresses' % len(current_ip_set))\r\n\r\n # Из множества IP адресов которые занесены в базу данных Redis вычитаем\r\n # множество IP адресов из конфигурации Quagga.\r\n # В итоге получаем набор адресов, которе необходимо занести в Quagga\r\n ip_for_add = current_ip_set.difference(self.quagga.read_current_networks())\r\n if ip_for_add:\r\n self.quagga.add_bgp_networks(ip_for_add)\r\n else:\r\n self.logger.info('Nothing to add in quagga')\r\n\r\nif __name__ == \"__main__\":\r\n z = ZapretInfoDB()\r\n z.update_domains()\r\n z.add_domains()", "repo_name": "krtvand/dns-sniffer", "sub_path": "ZapretInfoDB.py", "file_name": "ZapretInfoDB.py", "file_ext": "py", "file_size_in_byte": 5599, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "73", "api": [{"api_name": "redis.StrictRedis", "line_number": 22, "usage_type": "call"}, {"api_name": "QuaggaConfig.QuaggaConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 27, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 29, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 30, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 33, "usage_type": "attribute"}, {"api_name": "paramiko.SSHClient", "line_number": 41, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 42, "usage_type": "call"}, {"api_name": "ZapretInfoXMLParser.ZapretInfoXMLParser", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "7269589737", "text": "# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, include, url\n\n\nurlpatterns = patterns('src.shop.views',\n\n url(r'^$', 'index', name='index'),\n url(r'^category/(?P[^/]+)/$', 'category', name='category'),\n url(r'^product/(?P[^/]+)/$', 'product_detail', name='product_detail'),\n\n\n)\n", "repo_name": "D1onisko/market", "sub_path": "src/shop/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "17972085608", "text": "\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nimport torch\nimport torch.nn\nimport torchvision\n\nfrom torch.nn import Module, Conv3d, BatchNorm3d, MaxPool3d, ReLU\nfrom torch.nn import functional as F\nfrom scipy import signal\n\nfrom AQA_head.nets.utils import ChannelShuffleLayer, Channelwise_1D, TemporalConv1DLayer, MaxPoolChannel, AvgPoolChannel\n\n\n\nclass AQA(Module):\n \"\"\"\n AQA implementation in pytorch\n \"\"\"\n\n def __init__(self, input_shape_JCA, input_shape_ADA, n_layers_JCA=4,\n n_layers_ADA=3,n_branches_JCA=3, n_branches_ADA=3,\n expansion_factor_JCA = 1.25, expansion_factor_ADA = 1.25, final_expansion_JCA_for_balance = 10,\n figure_skating_flag = 0, Ablated_flag = 'N',\n Spatial_Attention_Method = 'N', Temporal_Attention_Method = 'N',\n Coeff_Spatial_Attention = 1, Coeff_Temporal_Attention=1):\n\n super(AQA, self).__init__()\n is_dilated = False\n\n self.expansion_factor_JCA = expansion_factor_JCA\n self.expansion_factor_ADA = expansion_factor_ADA\n self.n_layers_JCA = n_layers_JCA\n self.n_layers_ADA = n_layers_ADA\n self.is_dilated = is_dilated\n self.n_branches_JCA = n_branches_JCA\n self.n_branches_ADA = n_branches_ADA\n self.final_expansion_JCA_for_balance = final_expansion_JCA_for_balance\n self.figure_skating_flag = figure_skating_flag\n self.n_channels_out = None\n self.Ablated_flag = Ablated_flag\n self.Spatial_Attention_Method = Spatial_Attention_Method\n self.Temporal_Attention_Method = Temporal_Attention_Method\n self.Coeff_Spatial_Attention = Coeff_Spatial_Attention\n self.Coeff_Temporal_Attention = Coeff_Temporal_Attention\n\n\n input_shape_JCA = list(input_shape_JCA)\n input_shape_ADA = list(input_shape_ADA)\n n_channels_in_ADA = input_shape_ADA[1]\n n_channels_in_JCA = input_shape_JCA[1]\n\n n_channels_out_JCA = self.__define_JCA_layers_overall(input_shape_JCA, n_layers_JCA, n_branches_JCA,\n expansion_factor_JCA, is_dilated)\n n_channels_out_ADA = self.__define_ADA_layers_overall(input_shape_ADA, n_layers_ADA, n_branches_ADA,\n expansion_factor_ADA, is_dilated)\n\n self.n_channels_out_JCA = n_channels_out_JCA\n self.n_channels_out_ADA = n_channels_out_ADA\n\n if self.Ablated_flag == 'ADA':\n self.n_channels_out = self.n_channels_out_ADA\n elif self.Ablated_flag == 'JCA':\n self.n_channels_out = self.n_channels_out_JCA * final_expansion_JCA_for_balance\n elif self.Ablated_flag == 'Appearance':\n self.n_channels_out = n_channels_in_ADA\n elif self.Ablated_flag == 'Pose':\n self.n_channels_out = n_channels_in_JCA\n else:\n self.n_channels_out = (self.n_channels_out_ADA + self.n_channels_out_JCA * final_expansion_JCA_for_balance)\n\n def forward(self, input):\n\n input_ADA = input[0]\n input_JCA = input[1]\n input_ADA = self.__attention_module(input_ADA)\n input_JCA = self.__attention_module(input_JCA)\n\n expansion_factor_JCA = self.expansion_factor_JCA\n expansion_factor_ADA = self.expansion_factor_ADA\n n_layers_JCA = self.n_layers_JCA\n n_layers_ADA = self.n_layers_ADA\n n_branches_JCA = self.n_branches_JCA\n n_branches_ADA = self.n_branches_ADA\n\n out = []\n\n if self.Ablated_flag == 'ADA':\n output_ADA = self.__call_ADA_layers(input_ADA, n_layers_ADA, n_branches_ADA, expansion_factor_ADA)\n out.append(output_ADA)\n output = output_ADA\n elif self.Ablated_flag == 'JCA':\n output_JCA = self.__call_JCA_layers(input_JCA, n_layers_JCA, n_branches_JCA, expansion_factor_JCA)\n out.append(output_JCA)\n output = output_JCA\n elif self.Ablated_flag == 'Appearance':\n output = input[0]\n elif self.Ablated_flag == 'Pose':\n output = input[1]\n else:\n output_ADA = self.__call_ADA_layers(input_ADA, n_layers_ADA, n_branches_ADA, expansion_factor_ADA)\n out.append(output_ADA)\n output_JCA = self.__call_JCA_layers(input_JCA, n_layers_JCA, n_branches_JCA, expansion_factor_JCA)\n out.append(output_JCA)\n output = torch.cat(out, dim=1)\n\n return output\n\n def __define_JCA_layers_overall(self, input_shape_JCA, n_layers_JCA, n_branches_JCA, expansion_factor_JCA, is_dilated):\n \"\"\"\n Define Overall JCA layers (Not the details)\n \"\"\"\n\n # how many layers of timeception\n for i in range(n_layers_JCA):\n\n n_channels_in_JCA = input_shape_JCA[1]\n layer_num = i + 1\n\n # get details about grouping\n getchannels = self.__get_n_channels_for_JCA(expansion_factor_JCA, n_branches_JCA, n_channels_in_JCA)\n n_channels_per_branch_in_JCA, n_channels_base_out_JCA, n_channels_out_JCA = getchannels\n\n\n # temporal conv per group\n self.__define_JCA(input_shape_JCA, n_branches_JCA, is_dilated, layer_num)\n\n # activation\n layer_name = 'relu_JCA%d' % (layer_num)\n layer = ReLU()\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n # SpatialConvForHeatmaps\n layer_name = 'conv_spatial_JCA%d' % (layer_num)\n layer = Conv3d(n_channels_out_JCA , n_channels_out_JCA , kernel_size=(1, 3, 3),padding=(0,layer_num!=n_layers_JCA,layer_num!=n_layers_JCA))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n layer_name = 'maxpool_spatial_JCA%d' % (layer_num)\n layer = MaxPool3d(kernel_size=(1, (self.figure_skating_flag != 1) + 1, (self.figure_skating_flag != 1) + 1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n n_channels_in_JCA = n_channels_out_JCA\n input_shape_JCA[1] = n_channels_in_JCA\n input_shape_JCA[2] = int(input_shape_JCA[2]/float(2))\n\n layer_name = 'temp&spatialMaxPool_JCA_last_adapt'\n layer = MaxPool3d(kernel_size=(8, (self.n_layers_JCA != 3 or self.figure_skating_flag==1) + 1, (self.n_layers_JCA != 3 or self.figure_skating_flag==1) + 1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n layer_name = 'channel_expansion_JCA_last_adapt'\n layer = Conv3d(input_shape_JCA[1], input_shape_JCA[1] * self.final_expansion_JCA_for_balance , kernel_size=(1, 1, 1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n return n_channels_in_JCA\n\n def __define_JCA(self, input_shape_JCA,n_branches_JCA, is_dilated, layer_num):\n \"\"\"\n Define detailed covolutions inside the each JCA block\n \"\"\"\n getchannels_JCA = self.__get_n_channels_for_JCA(self.expansion_factor_JCA, n_branches_JCA, input_shape_JCA[1])\n n_channels_per_branch_in_JCA, n_channels_base_branch_out_JCA, n_channels_out_JCA = getchannels_JCA\n\n #assert n_channels_in_JCA % n_channels_per_branch_in_JCA == 0\n\n # type of multi-scale kernels to use: either multi_kernel_sizes or multi_dilation_rates\n if is_dilated:\n kernel_sizes_temporal = (3, 3, 3)\n dilation_rates_temporal = (1, 2, 3)\n else:\n kernel_sizes_temporal = (3, 5, 7)\n dilation_rates_temporal = (1, 1, 1)\n\n kernel_size_channel = 3\n dilation_rate_channel = 1\n\n for branch_num in range (n_branches_JCA):\n\n base_AvgPool_size = int(n_channels_per_branch_in_JCA/float(n_channels_base_branch_out_JCA))\n n_channels_current_branch_out_JCA = int(n_channels_per_branch_in_JCA/(base_AvgPool_size + branch_num))\n\n ## Temporal\n layer_name = 'temporal_b%d_JCA%d' % (branch_num, layer_num)\n layer = TemporalConv1DLayer(input_shape_JCA, kernel_sizes_temporal[branch_num],\n dilation_rates_temporal[branch_num], layer_name)\n setattr(self, layer_name, layer)\n\n ## Temporal MaxPool\n layer_name = 'tmpmaxpool_b%d_JCA%d' % (branch_num, layer_num)\n layer = MaxPool3d(kernel_size=(2,1,1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n ## Channelwise\n layer_name = 'convch_b%d_JCA%d' % (branch_num, layer_num)\n layer = Channelwise_1D(input_shape_JCA, kernel_size_channel,\n dilation_rate_channel, layer_name)\n setattr(self, layer_name, layer)\n\n\n ## Channel-wise AvgPool\n # ChannelPoolsize = int(n_channels_per_branch_in_JCA/n_channels_per_branch_out_JCA)\n\n layer_name = 'chavgpool_b%d_JCA%d' % (branch_num, layer_num)\n layer = AvgPoolChannel(base_AvgPool_size + branch_num, layer_name)\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n ## BatchNorm\n layer_name = 'bn1_b%d_JCA%d' % (branch_num, layer_num)\n layer = BatchNorm3d(n_channels_current_branch_out_JCA)\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n\n ## BatchNorm\n # layer_name = 'bn2_b%d_JCA%d' % (branch_num, layer_num)\n # layer = BatchNorm3d(n_channels_current_branch_out_JCA)\n # layer._name = layer_name\n # setattr(self, layer_name, layer)\n\n\n def __define_ADA_layers_overall(self, input_shape_ADA, n_layers_ADA, n_branches_ADA, expansion_factor_ADA, is_dilated):\n \"\"\"\n Define layers inside the timeception layers.\n \"\"\"\n\n # how many layers of timeception\n for i in range(n_layers_ADA):\n\n n_channels_in_ADA = input_shape_ADA[1]\n\n layer_num = i + 1\n\n # get details about grouping\n getchannels_ADA = self.__get_n_channels_for_ADA(expansion_factor_ADA, n_branches_ADA, n_channels_in_ADA=\n n_channels_in_ADA)\n n_channels_out_sep_per_branch_in_out, n_channels_out_ADA = getchannels_ADA\n\n # channel reduction\n layer_name = 'chreduce_ADA%d' % (layer_num)\n layer = Conv3d(n_channels_in_ADA , n_channels_out_sep_per_branch_in_out , kernel_size=(1, 1, 1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n # temporal conv per group\n self.__define_ADA(input_shape_ADA, n_branches_ADA, is_dilated, layer_num)\n\n # activation\n layer_name = 'relu_ADA%d' % (layer_num)\n layer = ReLU()\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n # SpatialConv\n if self.Spatial_Attention_Method != 'N':\n\n layer_name = 'conv_spatial_ADA%d' % (layer_num)\n layer = Conv3d(n_channels_out_ADA , n_channels_out_ADA , kernel_size=(1, 3, 3), padding= (0,1,1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n\n n_channels_in_ADA = n_channels_out_ADA\n input_shape_ADA[1] = n_channels_in_ADA\n\n\n return n_channels_in_ADA\n\n\n def __define_ADA(self, input_shape_ADA, n_branches_ADA, is_dilated, layer_num):\n \"\"\"\n Define layers inside grouped convolutional block.\n \"\"\"\n\n getchannels_ADA = self.__get_n_channels_for_ADA(self.expansion_factor_ADA, n_branches_ADA, input_shape_ADA[1])\n n_channels_sep_per_branch_in_out_ADA, n_channels_out_ADA = getchannels_ADA\n\n #assert n_channels_in_ADA % n_channels_per_branch_in_ADA == 0\n\n # type of multi-scale kernels to use: either multi_kernel_sizes or multi_dilation_rates\n if is_dilated:\n kernel_sizes_temporal = (3, 3, 3)\n dilation_rates_temporal = (1, 2, 3)\n else:\n kernel_sizes_temporal = (3, 5, 7)\n dilation_rates_temporal = (1, 1, 1)\n\n\n for branch_num in range (n_branches_ADA):\n\n ## Temporal\n temp_inp_branch_ADA = input_shape_ADA\n temp_inp_branch_ADA[1] = n_channels_sep_per_branch_in_out_ADA\n layer_name = 'temporal_b%d_ADA%d' % (branch_num, layer_num)\n layer = TemporalConv1DLayer(temp_inp_branch_ADA, kernel_sizes_temporal[branch_num],\n dilation_rates_temporal[branch_num], layer_name)\n\n setattr(self, layer_name, layer)\n\n ## BatchNorm\n layer_name = 'bn1_b%d_ADA%d' % (branch_num, layer_num)\n layer = BatchNorm3d(n_channels_sep_per_branch_in_out_ADA)\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n ## Temporal MaxPool\n layer_name = 'tmpmaxpool_b%d_ADA%d' % (branch_num, layer_num)\n layer = MaxPool3d(kernel_size=(2,1,1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n ## BatchNorm\n layer_name = 'bn2_b%d_ADA%d' % (branch_num, layer_num)\n layer = BatchNorm3d(n_channels_sep_per_branch_in_out_ADA)\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n def __call_JCA_layers(self, tensor, n_layers_JCA, n_branches_JCA, expansion_factor_JCA):\n\n\n input_shape = tensor.size()\n n_channels_in_JCA = input_shape[1]\n\n # how many layers of timeception\n\n for i in range(n_layers_JCA):\n layer_num = i + 1\n\n # get details about grouping\n getchannels_JCA = self.__get_n_channels_for_JCA(expansion_factor_JCA, n_branches_JCA, n_channels_in_JCA)\n n_channels_per_branch_in, n_channels_base_branch_out, n_channels_out = getchannels_JCA\n\n # temporal conv per group\n tensor = self.__call_JCA(tensor, layer_num, n_branches_JCA)\n\n n_channels_in_JCA = n_channels_out\n\n tensor = getattr(self, 'temp&spatialMaxPool_JCA_last_adapt')(tensor)\n tensor = getattr(self, 'channel_expansion_JCA_last_adapt')(tensor)\n\n\n return tensor\n\n\n def __call_JCA(self, tensor, layer_num, n_branches):\n\n t = []\n\n for branches in range (n_branches):\n\n t_1 = getattr(self, 'temporal_b%d_JCA%d' % (branches, layer_num))(tensor)\n t_2 = getattr(self, 'convch_b%d_JCA%d' % (branches, layer_num))(t_1)\n t_3 = getattr(self, 'chavgpool_b%d_JCA%d' % (branches, layer_num))(t_2)\n t_4 = getattr(self, 'tmpmaxpool_b%d_JCA%d' % (branches, layer_num))(t_3)\n t_5 = getattr(self, 'bn1_b%d_JCA%d' % (branches, layer_num))(t_4)\n t.append(t_5)\n\n tensor = torch.cat(t, dim=1)\n tensor = getattr(self, 'relu_JCA%d' % (layer_num))(tensor)\n\n tensor = getattr(self, 'conv_spatial_JCA%d' % (layer_num))(tensor)\n tensor = getattr(self, 'maxpool_spatial_JCA%d' % (layer_num))(tensor)\n\n\n\n # concatenate channels of branches\n\n return tensor\n\n def __call_ADA_layers(self, tensor, n_layers_ADA, n_branches_ADA, expansion_factor_ADA):\n\n\n input_shape = tensor.size()\n n_channels_in_ADA = input_shape[1]\n\n # how many layers of timeception\n\n for i in range(n_layers_ADA):\n layer_num = i + 1\n\n # get details about grouping\n getchannels_ADA = self.__get_n_channels_for_ADA(expansion_factor_ADA, n_branches_ADA, n_channels_in_ADA )\n n_channels_sep_per_branch_in_out, n_channels_out = getchannels_ADA\n\n # temporal conv per group\n tensor = self.__call_ADA(tensor, layer_num, n_branches_ADA)\n\n n_channels_in_ADA = n_channels_out\n\n\n return tensor\n\n\n def __call_ADA(self, tensor, layer_num, n_branches):\n\n t_1 = getattr(self, 'chreduce_ADA%d' % (layer_num))(tensor)\n t = []\n\n for branches in range (n_branches):\n\n t_2 = getattr(self, 'temporal_b%d_ADA%d' % (branches, layer_num))(t_1)\n # t_3 = getattr(self, 'bn1_b%d_ADA%d' % (branches, layer_num))(t_2)\n t_3 = getattr(self, 'tmpmaxpool_b%d_ADA%d' % (branches, layer_num))(t_2)\n t_5 = getattr(self, 'bn2_b%d_ADA%d' % (branches, layer_num))(t_3)\n t.append(t_5)\n\n t = torch.cat(t, dim=1)\n tensor = getattr(self, 'relu_ADA%d' % (layer_num))(t)\n\n if self.Spatial_Attention_Method != 'N':\n tensor = getattr(self, 'conv_spatial_ADA%d' % (layer_num))(tensor)\n\n # concatenate channels of branches\n\n return tensor\n\n def __get_n_channels_for_JCA(self, expansion_factor_JCA, n_branches_JCA, n_channels_in_JCA):\n\n if n_branches_JCA == 3:\n if expansion_factor_JCA == 13/12:\n channelAvgPool_size = 2\n elif expansion_factor_JCA == 47/60:\n channelAvgPool_size = 3\n else:\n raise ValueError('Current setting of expansion factor is not practical. Please try again')\n elif n_branches_JCA == 2:\n if expansion_factor_JCA == 5 / 6:\n channelAvgPool_size = 2\n elif expansion_factor_JCA == 7/12:\n channelAvgPool_size = 3\n else:\n raise ValueError('Current setting of expansion factor is not practical. Please try again')\n else:\n raise ValueError('Deploying more than 3 branches or a single branch is not valid. Please try again')\n\n n_channels_per_branch_in = int(n_channels_in_JCA)\n # n_channels_base_branch_out = int(n_channels_in_JCA * expansion_factor_JCA / float(n_branches_JCA))\n n_channels_base_branch_out = int (n_channels_in_JCA/float(channelAvgPool_size))\n\n n_channels_out = 0\n for branch in range(n_branches_JCA):\n n_channels_out = n_channels_out + int(n_channels_in_JCA/float(channelAvgPool_size + branch))\n\n return n_channels_per_branch_in, n_channels_base_branch_out, n_channels_out\n\n\n def __get_n_channels_for_ADA(self, expansion_factor_ADA, n_branches_ADA, n_channels_in_ADA):\n\n n_channels_out_sep_per_branch_in_out = int(n_channels_in_ADA * expansion_factor_ADA/ float(n_branches_ADA))\n n_channels_out = int(n_channels_out_sep_per_branch_in_out * n_branches_ADA)\n\n return n_channels_out_sep_per_branch_in_out, n_channels_out\n\n\n def __attention_module(self, tensor):\n\n Num_timesteps = tensor.size()[2]\n Spatial_size = tensor.size()[3]\n a = self.Coeff_Temporal_Attention\n \n if self.Temporal_Attention_Method == 'Linear':\n timeArray = torch.arange(Num_timesteps).to(device='cuda', dtype=torch.float)\n timeArray = a + (1 - a) * (timeArray) // Num_timesteps\n new_tensor = tensor * timeArray.view(1, 1, Num_timesteps, 1, 1)\n else:\n new_tensor = tensor\n\n if self.Temporal_Attention_Method == 'Gaussian':\n spatialArray = torch.from_numpy(signal.gaussian(Spatial_size, std = self.Coeff_Spatial_Attention)).to(device='cuda', dtype=torch.float)\n new_tensor = new_tensor * spatialArray.view(1, 1, 1, Spatial_size, 1)\n new_tensor = new_tensor * spatialArray.view(1, 1, 1, 1, Spatial_size)\n\n return new_tensor\n\n", "repo_name": "MahdiNek/EAGLE-Eye", "sub_path": "AQA_head/nets/AQA.py", "file_name": "AQA.py", "file_ext": "py", "file_size_in_byte": 19484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn.Conv3d", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool3d", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool3d", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn.Conv3d", "line_number": 160, "usage_type": "call"}, {"api_name": "AQA_head.nets.utils.TemporalConv1DLayer", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool3d", "line_number": 199, "usage_type": "call"}, {"api_name": "AQA_head.nets.utils.Channelwise_1D", "line_number": 205, "usage_type": "call"}, {"api_name": "AQA_head.nets.utils.AvgPoolChannel", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.nn.Conv3d", "line_number": 251, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.nn.Conv3d", "line_number": 268, "usage_type": "call"}, {"api_name": "AQA_head.nets.utils.TemporalConv1DLayer", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool3d", "line_number": 318, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 324, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 368, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 417, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 472, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 472, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 479, "usage_type": "call"}, {"api_name": "scipy.signal.gaussian", "line_number": 479, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 479, "usage_type": "name"}, {"api_name": "torch.float", "line_number": 479, "usage_type": "attribute"}]}
+{"seq_id": "20647540700", "text": "from beaker.middleware import SessionMiddleware\nfrom app import vn, auth\nimport bottle\nimport os\n\n#Beaker session middleware\nsession_opts = {\n 'session.type': 'file',\n 'session.cookie_expires': True,\n 'session.auto' : True,\n 'session.data_dir': 'cache'\n}\napp = bottle.default_app()\napp = SessionMiddleware(app, session_opts)\nport = int(os.environ.get(\"PORT\", 5000))\n\nimport socket\nhostname = socket.gethostname()\n#if on heroku host name will not contain rizvan\nif 'Rizvan' in hostname:\n bottle.run(host='0.0.0.0', port=port, app=app)\n\n\n", "repo_name": "rizvn/videonotes", "sub_path": "start.py", "file_name": "start.py", "file_ext": "py", "file_size_in_byte": 551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "bottle.default_app", "line_number": 13, "usage_type": "call"}, {"api_name": "beaker.middleware.SessionMiddleware", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "socket.gethostname", "line_number": 18, "usage_type": "call"}, {"api_name": "bottle.run", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "43681437801", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport random\nimport string\n\ntry:\n browser = webdriver.Chrome()\n browser.get(\"http://suninjuly.github.io/huge_form.html\")\n elements = browser.find_elements(By.CSS_SELECTOR, \"input[type='text']\")\n\n for element in elements:\n random_word = ''.join(random.choice(string.ascii_lowercase) for _ in range(8))\n element.send_keys(random_word)\n\n checkbox = browser.find_element(By.CSS_SELECTOR, \"input[type='checkbox']\")\n checkbox.click()\n\n radio = browser.find_element(By.CSS_SELECTOR, \"input[value='robots']\")\n radio.click()\n\n button = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\n button.click()\n\nfinally:\n time.sleep(3)\n browser.quit()", "repo_name": "KopchukVolod/SeleniumWebDriver-on-Python", "sub_path": "The code navigates to a webpage that contains a huge form with numerous input fields of various types.py", "file_name": "The code navigates to a webpage that contains a huge form with numerous input fields of various types.py", "file_ext": "py", "file_size_in_byte": 773, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 8, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 8, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 10, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 10, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 13, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 16, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 16, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 19, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 22, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 22, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "41833093154", "text": "### BY BOAZ MENARD AND BRANDON ELMORE ###\r\nDISTANCE_THRESHOLD = 3 # distance in inches within which an object is detected\r\nSTARTING_SPEED = 50\r\n\r\nfrom picamera import PiCamera\r\nimport traceback\r\nimport picar\r\nfrom picar.obstacle_sensor import *\r\nfrom picar.front_wheels import *\r\nfrom picar.back_wheels import *\r\nfrom picar.line_sensor import *\r\nimport time\r\nimport Pyro4\r\nimport pickle\r\nfrom PIL import Image\r\n\r\nsteering = Front_Wheels() # create a Front_Wheels object for steering the car\r\nmotors = Back_Wheels() # create a Back_Wheels object to move the car\r\nobjSensor = Obstacle_Sensor() # create an Object_Sensor() object to detect distance to objects\r\nlineSensor = Line_Sensor() # create a Line_Sensor() object to detect lines on the floor\r\ncamera = PiCamera()\r\npicar.setup()\r\nsteering.ready()\r\nmotors.speed = STARTING_SPEED\r\nmotors.ready()\r\nhardRight = 135\r\nslightRight = 100\r\nstr8 = 90\r\nslightLeft = 80\r\nhardLeft = 45\r\n\r\nstarfleetcomm = Pyro4.Proxy('PYRONAME:starfleetcomm')\r\n\r\n\r\n\r\ndef followLine():\r\n '''\r\n Read line sensor and return a two tuple with the first element the speed\r\n and the second element the turn angle ie (speed, angle).\r\n '''\r\n readings = lineSensor.read_digital()\r\n print('Line Sensor: ', readings)\r\n if readings == [0, 0, 0, 0, 0]:\r\n steering.turn(hardRight)\r\n elif readings == [1, 1, 1, 1, 1]:\r\n motors.stop()\r\n steering.turn_straight()\r\n motors.backward()\r\n time.sleep(.5)\r\n motors.stop()\r\n steering.turn_left()\r\n motors.forward()\r\n time.sleep(1)\r\n steering.turn(str8)\r\n elif readings == [0, 0, 1, 1, 1] or readings == [0, 0, 0, 1, 1] or readings == [0, 0, 0, 0, 1]:\r\n steering.turn(str8)\r\n elif readings == [0, 1, 1, 1, 0] or readings == [0, 0, 1, 0, 0] or readings == [0, 0, 1, 1, 0] or readings == [0, 1, 1, 0, 0]:\r\n steering.turn(slightLeft)\r\n elif readings == [1, 0, 0, 0, 0] or readings == [1, 1, 0, 0, 0] or readings == [1, 1, 1, 0, 0]:\r\n steering.turn(hardLeft)\r\n\r\n\r\n \r\ndef Obstacle():\r\n \"\"\"Takes a picture when an object is detected then waits for an input from user\"\"\"\r\n if objSensor.distance() <= DISTANCE_THRESHOLD:\r\n motors.stop()\r\n camera.start_preview()\r\n time.sleep(.5)\r\n camera.capture('/home/pi/Desktop/TermProject/object.jpg')\r\n camera.stop_preview()\r\n img = Image.open('/home/pi/Desktop/TermProject/object.jpg')\r\n imge = pickle.dumps(img)\r\n starfleetcomm.sendImage(imge)\r\n while not starfleetcomm.isNewCommandPosted():\r\n time.sleep(2)\r\n Decision = starfleetcomm.getCommand()\r\n if Decision == 'M':\r\n steering.turn(45)\r\n steering.turn(135)\r\n steering.ready()\r\n time.sleep(3)\r\n motors.forward()\r\n elif Decision == 'I':\r\n motors.forward()\r\n steering.turn(45)\r\n time.sleep(2)\r\n traverseTheMaze()\r\n elif Decision == \"S\":\r\n motors.forward()\r\n time.sleep(1)\r\n \r\n \r\ndef traverseTheMaze():\r\n '''\r\n Continuosly steers the PiCar-S until no line is detected, where by\r\n the function exits.\r\n '''\r\n defaultSpeed = s = 20\r\n motors.speed = 20\r\n motors.forward()\r\n while s != 0:\r\n print(objSensor.distance())\r\n Obstacle()\r\n followLine()\r\n \r\n motors.speed = s\r\n motors.stop()\r\n motors.speed = defaultSpeed\r\n#def steering():\r\n #steering.turn_left()\r\n", "repo_name": "boazmenard/Design-Thinking-II", "sub_path": "A Maze'n Robot - Klingons/TraverseTheMaze.py", "file_name": "TraverseTheMaze.py", "file_ext": "py", "file_size_in_byte": 3529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "picamera.PiCamera", "line_number": 21, "usage_type": "call"}, {"api_name": "picar.setup", "line_number": 22, "usage_type": "call"}, {"api_name": "Pyro4.Proxy", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 72, "usage_type": "name"}, {"api_name": "pickle.dumps", "line_number": 73, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "71542306795", "text": "import utils as ut\n\ndef parse1(line):\n\treturn line\n\ndef day05p1():\n print('day 05 part 1')\n lines = ut.get_file('day05_input.txt', parse1)\n max_seat_id = 0\n for line in lines:\n # s = ''\n # for letter in line[:-3]:\n # if letter == 'F': s += '0'\n # if letter == 'B': s += '1'\n # row_num = int(s, 2)\n #\n # s = ''\n # for letter in line[-3:]:\n # if letter == 'L': s += '0'\n # if letter == 'R': s += '1'\n # col_num = int(s, 2)\n #\n # max_seat_id = max(max_seat_id, row_num * 8 + col_num)\n\n mapping = {\n 'F': '0',\n 'B': '1',\n 'L': '0',\n 'R': '1'\n }\n for k,v in mapping.items():\n line = line.replace(k,v)\n\n max_seat_id = max(max_seat_id, int(line, 2))\n return max_seat_id\n\nprint(day05p1()) # 7:35\n\ndef parse2(line):\n\treturn line\n\ndef day05p2():\n print('day 05 part 2')\n lines = ut.get_file('day05_input.txt', parse2)\n max_seat_id = 0\n seat_id_list = []\n for line in lines:\n s = ''\n for letter in line[:-3]:\n if letter == 'F': s += '0'\n if letter == 'B': s += '1'\n row_num = int(s, 2)\n s = ''\n for letter in line[-3:]:\n if letter == 'L': s += '0'\n if letter == 'R': s += '1'\n col_num = int(s, 2)\n seat_id = row_num * 8 + col_num\n seat_id_list.append(seat_id)\n # max_seat_id = max(max_seat_id, row_num * 8 + col_num)\n seat_id_list.sort()\n\n seat_id = seat_id_list[0]\n for val in seat_id_list:\n if val == seat_id:\n seat_id += 1\n else:\n print(val)\n seat_id = val+1\n\n\n return seat_id_list\n\nprint(day05p2()) # 12:34 cumulative\n", "repo_name": "yufengg/adventofcode", "sub_path": "day05.py", "file_name": "day05.py", "file_ext": "py", "file_size_in_byte": 1797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "73", "api": [{"api_name": "utils.get_file", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.get_file", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "35382922303", "text": "import logging\nfrom typing import List\n\nimport hydra\nimport torch\nfrom omegaconf import DictConfig\nfrom torch import nn\nfrom torch.nn import MSELoss\n\nfrom ..utils import (add_four_fourvectors_squared, add_fourvectors,\n add_fourvectors_squared, init_transforms)\n\n\nclass WeightedMSELoss(nn.Module):\n def __init__(self, weights):\n super().__init__()\n self.weights = weights\n\n def forward(self, pred, targets):\n diff_sq = (pred - targets) ** 2 / targets.shape[0]\n return torch.sum(self.weights * diff_sq)\n\n\nclass HiggsLoss(nn.Module):\n def __init__(self, targets: List[str], alphas: List[float] = None, use_square=True, output_mean=None, output_std=None, target_mean=None, target_std=None):\n super().__init__()\n self.targets = targets\n self.use_square = use_square\n _, self.output_transform, self.target_transform = init_transforms(\n fit_transforms=False, output_mean=output_mean, output_std=output_std, target_mean=target_mean, target_std=target_std)\n if alphas is not None:\n self.loss = WeightedMSELoss(alphas)\n else:\n self.loss = MSELoss()\n\n def forward(self, outputs: torch.Tensor, targets: torch.Tensor, attributes: torch.Tensor):\n self.output_transform.to(outputs.device)\n self.target_transform.to(outputs.device)\n outputs = self.output_transform.inverse_transform(outputs)\n\n if self.use_square:\n Nbx_pred, Nby_pred, Wax_pred, Way_pred, Waz_pred, Wam_squared_pred, Wbx_pred, Wby_pred, Wbz_pred, Wbm_squared_pred, Hx_pred, Hy_pred, Hz_pred, Hm_squared_pred = calc_tree_squared(\n outputs, attributes)\n pred_map = {'Na_Genx': outputs[:, 0], 'Na_Geny': outputs[:, 1], 'Na_Genz': outputs[:, 2], 'Nb_Genx': Nbx_pred,\n 'Nb_Geny': Nby_pred, 'Nb_Genz': outputs[:, 3], 'Wa_Genx': Wax_pred, 'Wa_Geny': Way_pred, 'Wa_Genz': Waz_pred, 'Wa_Genm_squared': Wam_squared_pred, 'Wb_Genx': Wbx_pred, 'Wb_Geny': Wby_pred, 'Wb_Genz': Wbz_pred, 'Wb_Genm_squared': Wbm_squared_pred, 'H_Genx': Hx_pred, 'H_Geny': Hy_pred, 'H_Genz': Hz_pred, 'H_Genm_squared': Hm_squared_pred}\n else:\n Nbx_pred, Nby_pred, Wax_pred, Way_pred, Waz_pred, Wam_pred, Wbx_pred, Wby_pred, Wbz_pred, Wbm_pred, Hx_pred, Hy_pred, Hz_pred, Hm_pred = calc_tree(\n outputs, attributes)\n pred_map = {'Na_Genx': outputs[:, 0], 'Na_Geny': outputs[:, 1], 'Na_Genz': outputs[:, 2], 'Nb_Genx': Nbx_pred,\n 'Nb_Geny': Nby_pred, 'Nb_Genz': outputs[:, 3], 'Wa_Genx': Wax_pred, 'Wa_Geny': Way_pred, 'Wa_Genz': Waz_pred, 'Wa_Genm': Wam_pred, 'Wb_Genx': Wbx_pred, 'Wb_Geny': Wby_pred, 'Wb_Genz': Wbz_pred, 'Wb_Genm': Wbm_pred, 'H_Genx': Hx_pred, 'H_Geny': Hy_pred, 'H_Genz': Hz_pred, 'H_Genm': Hm_pred}\n\n pred = torch.stack([pred_map[t] for t in self.targets], dim=1)\n pred = self.target_transform(pred)\n return self.loss(pred, targets)\n\n\ndef calc_tree(outputs: torch.Tensor, attributes: torch.Tensor):\n # Depends on order of outputs and attributes in dataset config.\n METx = attributes[:, 0]\n METy = attributes[:, 1]\n Lax_vis = attributes[:, 2]\n Lay_vis = attributes[:, 3]\n Laz_vis = attributes[:, 4]\n Lam_vis = attributes[:, 5]\n Lbx_vis = attributes[:, 6]\n Lby_vis = attributes[:, 7]\n Lbz_vis = attributes[:, 8]\n Lbm_vis = attributes[:, 9]\n Nax_pred = outputs[:, 0]\n Nay_pred = outputs[:, 1]\n Naz_pred = outputs[:, 2]\n Nam_pred = torch.zeros_like(Nax_pred) # Approximate 0 neutrino mass.\n Nbx_pred = METx - Nax_pred\n Nby_pred = METy - Nay_pred\n Nbz_pred = outputs[:, 3]\n Nbm_pred = torch.zeros_like(Nbx_pred) # Approximate 0 neutrino mass.\n Wax_pred, Way_pred, Waz_pred, Wam_pred = add_fourvectors(\n Nax_pred, Nay_pred, Naz_pred, Nam_pred, Lax_vis, Lay_vis, Laz_vis, Lam_vis)\n Wbx_pred, Wby_pred, Wbz_pred, Wbm_pred = add_fourvectors(\n Nbx_pred, Nby_pred, Nbz_pred, Nbm_pred, Lbx_vis, Lby_vis, Lbz_vis, Lbm_vis)\n Hx_pred, Hy_pred, Hz_pred, Hm_pred = add_fourvectors(Wax_pred, Way_pred, Waz_pred, Wam_pred,\n Wbx_pred, Wby_pred, Wbz_pred, Wbm_pred)\n return Nbx_pred, Nby_pred, Wax_pred, Way_pred, Waz_pred, Wam_pred, Wbx_pred, Wby_pred, Wbz_pred, Wbm_pred, Hx_pred, Hy_pred, Hz_pred, Hm_pred\n\n\ndef calc_tree_squared(outputs: torch.Tensor, attributes: torch.Tensor):\n # Depends on order of outputs and attributes in dataset config.\n METx = attributes[:, 0]\n METy = attributes[:, 1]\n Lax_vis = attributes[:, 2]\n Lay_vis = attributes[:, 3]\n Laz_vis = attributes[:, 4]\n LaE_vis = attributes[:, 5]\n Lbx_vis = attributes[:, 6]\n Lby_vis = attributes[:, 7]\n Lbz_vis = attributes[:, 8]\n LbE_vis = attributes[:, 9]\n Nax_pred = outputs[:, 0]\n Nay_pred = outputs[:, 1]\n Naz_pred = outputs[:, 2]\n NaE_pred = (Nax_pred**2 + Nay_pred**2 + Naz_pred**2)**0.5\n Nbx_pred = METx - Nax_pred\n Nby_pred = METy - Nay_pred\n Nbz_pred = outputs[:, 3]\n NbE_pred = (Nbx_pred**2 + Nby_pred**2 + Nbz_pred**2)**0.5\n Wax_pred, Way_pred, Waz_pred, Wam_squared_pred = add_fourvectors_squared(\n Nax_pred, Nay_pred, Naz_pred, NaE_pred, Lax_vis, Lay_vis, Laz_vis, LaE_vis)\n Wbx_pred, Wby_pred, Wbz_pred, Wbm_squared_pred = add_fourvectors_squared(\n Nbx_pred, Nby_pred, Nbz_pred, NbE_pred, Lbx_vis, Lby_vis, Lbz_vis, LbE_vis)\n Hx_pred, Hy_pred, Hz_pred, Hm_squared_pred = add_four_fourvectors_squared(\n Nax_pred, Nay_pred, Naz_pred, NaE_pred, Lax_vis, Lay_vis, Laz_vis, LaE_vis, Nbx_pred, Nby_pred, Nbz_pred, NbE_pred, Lbx_vis, Lby_vis, Lbz_vis, LbE_vis)\n return Nbx_pred, Nby_pred, Wax_pred, Way_pred, Waz_pred, Wam_squared_pred, Wbx_pred, Wby_pred, Wbz_pred, Wbm_squared_pred, Hx_pred, Hy_pred, Hz_pred, Hm_squared_pred\n\n\n@hydra.main(config_path=\"../../configs\", config_name=\"config\")\ndef main(cfg: DictConfig):\n feature_transform, output_transform, target_transform = hydra.utils.instantiate(\n cfg.transforms)\n datamodule = hydra.utils.instantiate(\n cfg.dataset, targets=cfg.dataset_criterion.targets, feature_transform=feature_transform, output_transform=output_transform, target_transform=target_transform)\n criterion = HiggsLoss(cfg.dataset_criterion.targets, None, output_transform.mean,\n output_transform.std, target_transform.mean, target_transform.std)\n dataloader = datamodule.train_dataloader()\n loss = 0.0\n for batch in dataloader:\n features, outputs, targets, attributes = batch\n loss += criterion(outputs, targets, attributes)\n\n logging.info(f'Expected loss: 0.0. Actual loss: {loss}')\n\n\nif __name__ == \"__main__\":\n main() # pylint: disable=no-value-for-parameter\n", "repo_name": "AlexSchuy/mass_regression", "sub_path": "mass_regression/criterion/higgs.py", "file_name": "higgs.py", "file_ext": "py", "file_size_in_byte": 6792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "utils.init_transforms", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.zeros_like", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.add_fourvectors", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.add_fourvectors", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.add_fourvectors", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 86, "usage_type": "attribute"}, {"api_name": "utils.add_fourvectors_squared", "line_number": 106, "usage_type": "call"}, {"api_name": "utils.add_fourvectors_squared", "line_number": 108, "usage_type": "call"}, {"api_name": "utils.add_four_fourvectors_squared", "line_number": 110, "usage_type": "call"}, {"api_name": "omegaconf.DictConfig", "line_number": 116, "usage_type": "name"}, {"api_name": "hydra.utils.instantiate", "line_number": 117, "usage_type": "call"}, {"api_name": "hydra.utils", "line_number": 117, "usage_type": "attribute"}, {"api_name": "hydra.utils.instantiate", "line_number": 119, "usage_type": "call"}, {"api_name": "hydra.utils", "line_number": 119, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 129, "usage_type": "call"}, {"api_name": "hydra.main", "line_number": 115, "usage_type": "call"}]}
+{"seq_id": "20278125768", "text": "\nimport mne\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('Agg')\nimport numpy as np\nimport os\n\n\n#%matplotlib qt\n \n#USER_LIST = 'Res/Users.npy'\n#DB_PATH = 'Res/dataset.npy'\n\ndef SetPaths():\n global USER_LIST\n global DB_PATH\n global USER_PLOT_PATH \n dirname = os.path.dirname(__file__) ### This defines dirctory path\n USER_LIST = os.path.join(dirname, 'Res/Users.npy') ### this puts absolute path in filename\n DB_PATH = os.path.join(dirname, 'Res/dataset.npy') \n USER_PLOT_PATH = os.path.join(dirname,'Res/Plots')\n \n\n\n\ndef createDir(s):\n SetPaths()\n global PLOT_PATH\n PLOT_PATH = os.path.join(USER_PLOT_PATH,s)\n os.mkdir(PLOT_PATH)\n\n\ndef Input(fname):\n SetPaths()\n raw = mne.io.read_raw_edf(fname,preload=True)\n fig = raw.plot()\n fig.savefig(PLOT_PATH+'/'+'raw-data.png')\n fig = raw.plot_psd()\n #print(USER_PLOT_PATH)\n #print(USER_PLOT_PATH+'/'+'power_spectral_density.png')\n fig.savefig(PLOT_PATH+'/'+'power_spectral_density.png')\n return raw\n \n \n\ndef SetMontage(raw):\n SetPaths()\n raw.filter(None, 50., h_trans_bandwidth='auto', filter_length='auto',\n phase='zero')\n for x in raw.ch_names:\n str = x.replace(\".\", \"\")\n raw.rename_channels(mapping={x:str})\n raw.filter(1, 40, n_jobs=2) \n montage = mne.channels.make_standard_montage('standard_1005')\n fig = montage.plot()\n fig.savefig(PLOT_PATH+'/'+'Montage.png')\n raw.set_montage(montage,match_case=False)\n\n \n\ndef ApplyPCA(raw,n): \n SetPaths()\n dictionary = {\"T2\" : 100}\n eves = mne.events_from_annotations(raw,dictionary)\n events = eves[0] \n events_ids = {\"target/stimulus\":100}\n epochs = mne.Epochs(raw,events,event_id=events_ids,preload=True)\n fig = epochs.plot()\n fig.savefig(PLOT_PATH+'/'+'raw_epochs.png')\n fig = epochs.plot_psd()\n fig.savefig(PLOT_PATH+'/'+'epochs_psd.png') \n from mne.decoding import UnsupervisedSpatialFilter\n from sklearn.decomposition import PCA\n X = epochs.get_data()\n pca = UnsupervisedSpatialFilter(PCA(n), average=False)\n pca_data = pca.fit_transform(X)\n tmin, tmax = -0.1, 0.3\n ev = mne.EvokedArray(np.mean(pca_data, axis=0),\n mne.create_info(n,epochs.info['sfreq'],ch_types='eeg'),tmin=tmin)\n fig = ev.plot(show=False, window_title=\"PCA\", time_unit='s')\n fig.savefig(PLOT_PATH+'/'+'PCA_15_Channels.png')\n fig = ev.plot_image()\n fig.savefig(PLOT_PATH+'/'+'EvokedData_As_Image.png')\n \n epoch_avg = np.mean(pca_data, axis=0) \n return pca_data,epoch_avg\n\n\ndef ModifyDatabase(epochs,label):\n try:\n print(\"ModifyDatabase begins....\")\n SetPaths()\n X = epochs\n y = label\n db = np.load(DB_PATH,allow_pickle='TRUE')\n flatX = X.flatten()\n flatX = np.append(flatX,y)\n db = np.append(db,[flatX],axis=0)\n np.save(DB_PATH,db) \n print(\"ModifyDatabase successful\")\n #db.close()\n except Exception as e:\n print(e)\n print(\"ModifyDB haga\")\n \n\n \n\n#For Model\ndef PredInput(fname):\n raw = mne.io.read_raw_edf(fname,preload=True)\n return raw\n \n \n\ndef PredSetMontage(raw):\n raw.filter(None, 50., h_trans_bandwidth='auto', filter_length='auto',\n phase='zero')\n for x in raw.ch_names:\n str = x.replace(\".\", \"\")\n raw.rename_channels(mapping={x:str})\n raw.filter(1, 40, n_jobs=2) \n montage = mne.channels.make_standard_montage('standard_1005')\n raw.set_montage(montage,match_case=False)\n\n \n\ndef PredApplyPCA(raw,n): \n dictionary = {\"T2\" : 100}\n eves = mne.events_from_annotations(raw,dictionary)\n events = eves[0] \n events_ids = {\"target/stimulus\":100}\n epochs = mne.Epochs(raw,events,event_id=events_ids,preload=True)\n from mne.decoding import UnsupervisedSpatialFilter\n from sklearn.decomposition import PCA\n X = epochs.get_data()\n pca = UnsupervisedSpatialFilter(PCA(n), average=False)\n pca_data = pca.fit_transform(X)\n epoch_avg = np.mean(pca_data, axis=0) \n return pca_data,epoch_avg\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "jtn-b/Mind-ID", "sub_path": "BackEnd/processing.py", "file_name": "processing.py", "file_ext": "py", "file_size_in_byte": 4141, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "matplotlib.use", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 32, "usage_type": "call"}, {"api_name": "mne.io.read_raw_edf", "line_number": 37, "usage_type": "call"}, {"api_name": "mne.io", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mne.channels.make_standard_montage", "line_number": 56, "usage_type": "call"}, {"api_name": "mne.channels", "line_number": 56, "usage_type": "attribute"}, {"api_name": "mne.events_from_annotations", "line_number": 66, "usage_type": "call"}, {"api_name": "mne.Epochs", "line_number": 69, "usage_type": "call"}, {"api_name": "mne.decoding.UnsupervisedSpatialFilter", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 77, "usage_type": "call"}, {"api_name": "mne.EvokedArray", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 80, "usage_type": "call"}, {"api_name": "mne.create_info", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 101, "usage_type": "call"}, {"api_name": "mne.io.read_raw_edf", "line_number": 113, "usage_type": "call"}, {"api_name": "mne.io", "line_number": 113, "usage_type": "attribute"}, {"api_name": "mne.channels.make_standard_montage", "line_number": 125, "usage_type": "call"}, {"api_name": "mne.channels", "line_number": 125, "usage_type": "attribute"}, {"api_name": "mne.events_from_annotations", "line_number": 132, "usage_type": "call"}, {"api_name": "mne.Epochs", "line_number": 135, "usage_type": "call"}, {"api_name": "mne.decoding.UnsupervisedSpatialFilter", "line_number": 139, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 141, "usage_type": "call"}]}
+{"seq_id": "35831638511", "text": "# encoding = 'utf-8'\nfrom keras.models import Sequential, Model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Input, LSTM, Dense, Embedding, RepeatVector, TimeDistributed, Dropout, LeakyReLU\nfrom bert_serving.client import BertClient\nfrom data_loader import Loader\nimport numpy as np\nimport re\nimport random\nimport json\n\nCORPUS_PATH = 'data\\\\證券交易法標記輸出檔_v1'\n\nbc = BertClient()\nprint('bert is ready')\ndl = Loader()\ndl.load_corpus(CORPUS_PATH)\n\n\ndef baseline_model(category_out):\n # create model\n model = Sequential()\n model.add(Dense(768, input_dim=768, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(256, input_dim=768, activation='tanh'))\n model.add(Dropout(0.5))\n model.add(Dense(category_out, activation='sigmoid'))\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\n# prepare training data\ndef generate_sentence2pos_dict(text):\n dic = {}\n idx = 0\n for i in range(len(text)):\n if text[i] == ',' or text[i] == '。':\n idx += 1\n dic[i] = idx\n return dic\n\n\ndef generate_meaning_label(label, dic):\n tmp = {}\n for l in label:\n if dic[l['start_offset']] == dic[l['end_offset']-1]:\n tmp[dic[l['start_offset']]] = l['label']\n else:\n for i in range(dic[l['start_offset']], dic[l['end_offset']-1]+1):\n tmp[i] = l['label']\n return tmp\n\n\ndef one_hot_label(ids, dic):\n ret = []\n for id in ids:\n res = [0 for _ in range(len(dic))]\n res[dic[id]] = 1\n ret.append(res)\n return ret\n\n\ntraining_data = []\n# label_data = []\n# dl.text = [dl.text[0]]\nfor i in range(len(dl.text)):\n text = dl.text[i]\n label = dl.label[i]\n sentences = re.split('[,。]', text)\n dic = generate_sentence2pos_dict(text)\n l_dic = generate_meaning_label(label, dic)\n for idx, s in enumerate(sentences):\n if(s):\n if idx in l_dic:\n training_data.append([s, l_dic[idx]])\n # label_data.append(l_dic[idx])\n else:\n training_data.append([s, 0])\n # label_data.append(0) # no label\n\nlabel2id = {}\nid = 0\nfor l in (training_data):\n if l[1] not in label2id:\n label2id[l[1]] = id\n id += 1\n else:\n pass\n\nwith open('label2id.txt', 'w', encoding='utf-8') as f:\n for k in label2id.keys():\n f.write('{} : {}\\n'.format(label2id[k], k))\n\n# encode first\ntmp_data = bc.encode([data[0] for data in training_data])\nfor data, encode in zip(training_data, tmp_data):\n # print(bc.encode([data[0]]))\n data[0] = encode\n\n# write training_data\nwith open('training_data', 'w', encoding='utf-8') as f:\n j = {}\n j['data'] = []\n for d in training_data:\n tmp = {}\n tmp['code'] = d[0].tolist()\n tmp['label'] = d[1]\n j['data'].append(tmp)\n json.dump(j, f)\n\n# show stat\nstat_dict = {}\nfor d in training_data:\n if d[1] in stat_dict:\n stat_dict[d[1]] += 1\n else:\n stat_dict[d[1]] = 1\nfor k in stat_dict:\n print('{} {}'.format(k, stat_dict[k]))\n\n# save label\n# label2id = {}\n# for id, l in enumerate(set(training_data)):\n# label2id[l[1]] = id\n# with open('label2id.txt', 'w', encoding='utf-8') as f:\n# for k in label2id.keys():\n# f.write('{} : {}\\n'.format(label2id[k], k))\n#\n# label_count = {}\n# for l in training_data:\n# if l[1] in label_count:\n# label_count[l[1]] += 1\n# else:\n# label_count[l[1]] = 1\n# print(label_count)\n# max_num = max([label_count[k] for k in label_count.keys()])\n#\n#\n# using bert to generate article vector and use simple NN to predict the law category\n# print('start bert encoding')\n# t_X = bc.encode(training_data).tolist()\n#\n# balance data by copy data randomly for specific label until number is equal\n# =====\n# tmp_X = []\n# tmp_Y = []\n#\n# label2data= {}\n# tmp_label2data = {}\n#\n#\n# def random_generate(data, num):\n# res = []\n# for _ in range(num):\n# res.append(data[random.randint(0, len(data)-1)])\n# return res\n#\n#\n# for i, x in enumerate(t_X):\n# if training_data[i][1] in label2data:\n# label2data[training_data[i][1]] += [x]\n# else:\n# label2data[training_data[i][1]] = [x]\n# for k in label2data.keys():\n# tmp_X += random_generate(label2data[k], max_num)\n# tmp_Y += [k for _ in range(max_num)]\n# tmp_Y = one_hot_label(tmp_Y, label2id)\n# train_X = np.asarray(tmp_X)\n# train_Y = np.asarray(tmp_Y)\n# print(train_X.shape)\n# print(train_Y.shape)\n\n\n# ====\n# class DataGenerator(object):\n# def __init__(self, rescale=None):\n# self.train = []\n# self.target = []\n# self.train_sentences = []\n# self.reset()\n#\n# def reset(self):\n# self.train = []\n# self.target = []\n# self.train_sentences = []\n#\n# def flow_from_directory(self, data, label2id, batch_size=32):\n# input_data = np.zeros(\n# (batch_size, 768),\n# dtype='float32')\n# target_data = np.zeros(\n# (batch_size, len(label2id)),\n# dtype='float32')\n# while True:\n# time = 0\n# for i, d in enumerate(data):\n# input_data[time] = d[0]\n# # self.train_sentences.append(d[0])\n# target_data[time, label2id[d[1]]] = 1.\n# time += 1\n# # print(input_data)\n# # print(target_data)\n# if time == batch_size:\n# self.train = input_data\n# self.target = target_data\n# inputs = np.asarray(self.train, dtype='float32')\n# targets = np.asarray(self.target, dtype='float32')\n# self.reset()\n# time = 0\n# # print('in: '+str(inputs[0]))\n# # print('out: '+str(targets[0]))\n# yield inputs, targets\n# self.train = input_data\n# self.target = target_data\n# inputs = np.asarray(self.train, dtype='float32')\n# targets = np.asarray(self.target, dtype='float32')\n# self.reset()\n# yield inputs, targets\n\n\n# EPOCHS = 20\n# BATCH = 128\n# STEPS_PER_EPOCH = (len(training_data)/BATCH)\n# datagen = DataGenerator()\n# model = baseline_model(len(label2id))\n# model.summary()\n# # cp = ModelCheckpoint('law2fact.h', monitor='val_loss', save_best_only=True)\n# # model.fit(train_X, train_Y, epochs=20, batch_size=32, shuffle=True)\n# model.fit_generator(\n# generator=datagen.flow_from_directory(training_data, label2id, BATCH),\n# epochs=EPOCHS,\n# steps_per_epoch=STEPS_PER_EPOCH,\n# )\n#\n# model.save('law2fact.h')", "repo_name": "huangcap/Law2Fact", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 6785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "bert_serving.client.BertClient", "line_number": 14, "usage_type": "call"}, {"api_name": "data_loader.Loader", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 27, "usage_type": "call"}, {"api_name": "re.split", "line_number": 70, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 110, "usage_type": "call"}]}
+{"seq_id": "16059277644", "text": "from aiogram.types import ReplyKeyboardMarkup, KeyboardButton, WebAppInfo, ReplyKeyboardRemove\n\nweb_app_markup = ReplyKeyboardMarkup(\n row_width=1,\n resize_keyboard=True,\n one_time_keyboard=True,\n keyboard=[[KeyboardButton(text='Выбор персонажа', web_app=WebAppInfo(url='https://ewynona.github.io/'))]]\n)\n\nremove_markup = ReplyKeyboardRemove()\n", "repo_name": "ewynona/telegram_ai", "sub_path": "app/telegram_ai/bot/keyboards.py", "file_name": "keyboards.py", "file_ext": "py", "file_size_in_byte": 370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "aiogram.types.ReplyKeyboardMarkup", "line_number": 3, "usage_type": "call"}, {"api_name": "aiogram.types.KeyboardButton", "line_number": 7, "usage_type": "call"}, {"api_name": "aiogram.types.WebAppInfo", "line_number": 7, "usage_type": "call"}, {"api_name": "aiogram.types.ReplyKeyboardRemove", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "42861934607", "text": "# !/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nfrom datetime import datetime\r\n\r\n\r\ndef get_worker():\r\n surname = input(\"Фамилия: \")\r\n name = input(\"Имя: \")\r\n zodiac = input(\"Знак зодиака: \")\r\n date = input(\"Дата: \")\r\n\r\n return {\r\n 'surname': surname,\r\n 'name': name,\r\n 'zodiac': zodiac,\r\n 'date': datetime.strptime(date, \"%Y-%m-%d\")\r\n }\r\n\r\n\r\ndef display_workers(staff):\r\n if staff:\r\n\r\n line = '+-{}-+-{}-+-{}-+-{}-+-{}-+'.format(\r\n '-' * 4,\r\n '-' * 30,\r\n '-' * 20,\r\n '-' * 15,\r\n '-' * 15\r\n )\r\n print(line)\r\n print(\r\n '| {:^4} | {:^30} | {:^20} | {:^15} | {:^15} |'.format(\r\n \"№\",\r\n \"Фамилия\",\r\n \"Имя\",\r\n \"Знак зодиака\",\r\n \"Дат�� рождения\"\r\n )\r\n )\r\n print(line)\r\n\r\n # Вывести данные о всех сотрудниках.\r\n for idx, worker in enumerate(staff, 1):\r\n print(\r\n '| {:^4} | {:^30} | {:^20} | {:^15} | {:^15} |'.format(\r\n idx,\r\n worker.get('surname', ''),\r\n worker.get('name', ''),\r\n worker.get('zodiac', ''),\r\n str(worker.get('date', '').date())\r\n )\r\n )\r\n print(line)\r\n\r\n else:\r\n print(\"Список пуст.\")\r\n\r\n\r\ndef select_workers(staff):\r\n month1 = int(input(\"Введите месяц: \"))\r\n result = []\r\n for worker in staff:\r\n if worker.get('date', '').month == month1:\r\n result.append(worker)\r\n return result\r\n\r\n\r\ndef main():\r\n workers = []\r\n\r\n while True:\r\n\r\n command = input(\">>> \").lower()\r\n\r\n if command == 'exit':\r\n break\r\n\r\n elif command == 'add':\r\n worker = get_worker()\r\n\r\n workers.append(worker)\r\n if len(workers) > 1:\r\n workers.sort(key=lambda item: item.get('name', ''))\r\n\r\n elif command == 'list':\r\n display_workers(workers)\r\n\r\n elif command.startswith('select'):\r\n selected = select_workers(workers)\r\n display_workers(selected)\r\n\r\n elif command == 'help':\r\n print(\"Список команд:\\n\")\r\n print(\"add - добавить запись;\")\r\n print(\"list - вывести список;\")\r\n print(\"select - список родившихся в один месяц;\")\r\n print(\"help - отобразить справку;\")\r\n print(\"exit - завершить работу с программой.\")\r\n\r\n else:\r\n print(\"\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "repo_name": "tamaranesterenko/Python.LR_11", "sub_path": "IDZ.py", "file_name": "IDZ.py", "file_ext": "py", "file_size_in_byte": 2860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "70819564717", "text": "import json as Json\n\nfrom xml_provider.xml_provider import XmlProvider\n\n\nclass DataFilesToRunConvertor:\n\n def __init__(self):\n self._import_path = None\n self._isa_json = None\n self._current_data_file = None\n self._data_files = None\n\n self.experiment_reference = None\n pass\n\n\n def get_data_files(self):\n data_files = []\n studies = self._isa_json[\"investigation\"][\"studies\"]\n for study in studies:\n assays = study[\"assays\"]\n for assay in assays:\n data_files.extend(assay[\"dataFiles\"])\n \n return data_files\n\n\n @property\n def import_path(self):\n return self._import_path\n\n @import_path.setter\n def import_path(self, path):\n self._import_path = path\n with open(path) as isa_file:\n self._isa_json = Json.load(isa_file)\n self._data_files = self.get_data_files()\n pass\n\n\n def validate(self):\n if self._is_empty(self.experiment_reference):\n raise ValueError(\"Experiment reference number is needed in dataFiles\")\n\n for data_file in self._data_files:\n if self._is_empty(data_file[\"name\"]):\n raise ValueError(\"name is needed in dataFiles\")\n \n requirment_comments = [\"file checksum\"]\n not_fulfilled_requirement_comments = requirment_comments.copy()\n for comment in data_file[\"comments\"]:\n key = comment[\"name\"]\n value = comment[\"value\"]\n if key in requirment_comments and not self._is_empty(value):\n not_fulfilled_requirement_comments.remove(key)\n if len(not_fulfilled_requirement_comments) > 0:\n raise ValueError(f\"{not_fulfilled_requirement_comments} is/are needed in dataFiles comments\")\n pass\n\n\n def convert(self):\n self.validate()\n\n run_set = self.run_set_xml()\n for data_file in self._data_files:\n self._current_data_file = data_file\n run_set.add(\n self.run_xml() \\\n .add(self.title_xml()) \\\n .add(self.experiment_ref_xml()) \\\n .add(\n self.data_block_xml() \\\n .add(self.files_xml())\n ) \n ) \n\n return run_set\n\n\n def run_set_xml(self):\n run_set = XmlProvider(\"RUN_SET\")\n\n return run_set\n\n\n def run_xml(self):\n run = XmlProvider(\"RUN\")\n run.set_attribute(\"alias\", self._current_data_file[\"@id\"])\n\n return run\n\n\n def title_xml(self):\n title = XmlProvider(\"TITLE\")\n title.set_text(self._current_data_file[\"name\"])\n\n return title\n\n\n def experiment_ref_xml(self):\n experiment_ref = XmlProvider(\"EXPERIMENT_REF\")\n experiment_ref.set_attribute(\"refname\", self.experiment_reference)\n\n return experiment_ref\n\n\n def data_block_xml(self):\n data_block = XmlProvider(\"DATA_BLOCK\")\n\n return data_block\n\n\n def files_xml(self):\n files = XmlProvider(\"FILES\")\n\n file = XmlProvider(\"FILE\")\n file.parent = files\n file.set_attribute(\"filename\", self._current_data_file[\"name\"])\n for comment in self._current_data_file[\"comments\"]:\n name = comment[\"name\"]\n value = comment[\"value\"]\n if name == \"file type\":\n file.set_attribute(\"filetype\", value)\n elif name == \"file checksum\":\n file.set_attribute(\"checksum\", value)\n\n return files\n\n\n def _is_empty(self, text: str):\n return not (text and text.strip())", "repo_name": "elixir-europe/biohackathon-projects-2022", "sub_path": "27/isa_ena/converter/data_files.py", "file_name": "data_files.py", "file_ext": "py", "file_size_in_byte": 3715, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "73", "api": [{"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "xml_provider.xml_provider.XmlProvider", "line_number": 82, "usage_type": "call"}, {"api_name": "xml_provider.xml_provider.XmlProvider", "line_number": 88, "usage_type": "call"}, {"api_name": "xml_provider.xml_provider.XmlProvider", "line_number": 95, "usage_type": "call"}, {"api_name": "xml_provider.xml_provider.XmlProvider", "line_number": 102, "usage_type": "call"}, {"api_name": "xml_provider.xml_provider.XmlProvider", "line_number": 109, "usage_type": "call"}, {"api_name": "xml_provider.xml_provider.XmlProvider", "line_number": 115, "usage_type": "call"}, {"api_name": "xml_provider.xml_provider.XmlProvider", "line_number": 117, "usage_type": "call"}]}
+{"seq_id": "35118576007", "text": "import copy\nfrom collections import Counter\n\n\ndef input_file(filename):\n nodes = {}\n with open(filename, 'rt') as file:\n for line in file: # loop over each line\n line_string = (line.strip().split('-'))\n\n if line_string[0] in nodes:\n nodes[line_string[0]].append(line_string[1])\n else:\n nodes[line_string[0]] = []\n nodes[line_string[0]].append(line_string[1])\n\n if line_string[1] in nodes:\n nodes[line_string[1]].append(line_string[0])\n else:\n nodes[line_string[1]] = []\n nodes[line_string[1]].append(line_string[0])\n print(nodes)\n return nodes\n\n\ndef seek_paths(nodes, current_route, node_key, possible_routes):\n for node in nodes[node_key]:\n if node == 'start':\n continue\n if node == 'end':\n current_route.append(node)\n possible_routes.append(current_route)\n continue\n if (node.islower() and node not in current_route) or node.isupper():\n copied_current_route = copy.deepcopy(current_route)\n copied_current_route.append(node)\n seek_paths(nodes, copied_current_route, node, possible_routes)\n\n\ndef seek_paths_improved(nodes, current_route, node_key, possible_routes):\n for node in nodes[node_key]:\n counter = Counter(current_route)\n may_visit = small_cave_counter(dict(counter))\n if node == 'start':\n continue\n if node == 'end':\n current_route.append(node)\n possible_routes.append(current_route)\n continue\n if node.isupper():\n copy_and_continue(current_route, node, nodes, possible_routes)\n if node.islower():\n if node not in current_route:\n copy_and_continue(current_route, node, nodes, possible_routes)\n elif counter[node] < 2 and may_visit:\n copy_and_continue(current_route, node, nodes, possible_routes)\n\n\ndef copy_and_continue(current_route, node, nodes, possible_routes):\n copied_current_route = copy.deepcopy(current_route)\n copied_current_route.append(node)\n seek_paths_improved(nodes, copied_current_route, node, possible_routes)\n\n\ndef small_cave_counter(visited_amount):\n filtered_visited_amount_by_lower = {k: v for (k, v) in visited_amount.items() if k.islower() and (k != 'end' and k != 'start')}\n filtered_visited_amount_by_value = {k: v for (k, v) in filtered_visited_amount_by_lower.items() if v > 1}\n return True if len(filtered_visited_amount_by_value) == 0 else False\n\n\ndef part_one(nodes):\n possible_routes = []\n current_route = ['start']\n seek_paths(nodes, current_route, 'start', possible_routes)\n\n\ndef part_two(nodes):\n possible_routes = []\n current_route = ['start']\n seek_paths_improved(nodes, current_route, 'start', possible_routes)\n print(len(possible_routes))\n\n\ndef main():\n nodes = input_file('input.txt')\n part_one(nodes)\n part_two(nodes)\n\n\nmain()\n", "repo_name": "JunDP9/aoc", "sub_path": "day12/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3043, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "copy.deepcopy", "line_number": 35, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 42, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 60, "usage_type": "call"}]}
+{"seq_id": "29829871996", "text": "\n\"\"\"\n\nPerfusion Slic demo\n\n\nRun demo:\n\n>>> python perfusion_slic_demo.py\n\nUsing QIN Breast data as an example. This data comes in matlab .mat format.\n\n\nTo load a 4D nifti image instead use:\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nfile1 = \"dce_mri.nii\"\nimg = nib.load(file1)\n\n# Determining ratio of voxel sizes\nhdr = img.get_header()\nraw1 = hdr.structarr\npixdim = raw1['pixdim']\n\nvox_size = np.abs(np.around([pixdim[1], pixdim[2], pixdim[3]], 2))\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n#Image\nimg1 = np.array(img.get_data())\n\nAuthor: Benjamin Irving (20141124)\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport nibabel as nib\nfrom time import time\nfrom perfusionslic import PerfSLIC\nimport h5py\nstart1 = time()\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Load Nifti data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# Folders\n\n# file1 = \"dce_mri.nii\"\n# img = nib.load(file1)\n#\n# # Determining ratio of voxel sizes\n# hdr = img.get_header()\n# raw1 = hdr.structarr\n# pixdim = raw1['pixdim']\n#\n# vox_size = np.abs(np.around([pixdim[1], pixdim[2], pixdim[3]], 2))\n#\n# #Image\n# img1 = np.array(img.get_data())\n\n# ~~~~~~~~~~~~~~~~~~ Load .mat data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nf = h5py.File('QIN-Breast-DCE-MRI-BC10-V1.mat', 'r')\nimg = f.get('da/data')\nhdr = f.get('da/hdr')\nSliceThickness = hdr[\"SliceThickness\"][0]\nPixelSpacing = hdr[\"PixelSpacing\"][0]\nvox_size = np.around([PixelSpacing[0], PixelSpacing[1], SliceThickness[0]])\nimg1 = np.transpose(img, [3, 2, 1, 0])\n\n# Load reconstructed roi\n# f2 = h5py.File('QIN-Breast-DCE-MRI-BC10-V1roi.mat', 'r')\n# roi = f2.get('roi1')\n# roi1 = np.transpose(roi, [2, 1, 0])\n\n# Select a sub-region containing tumour (for speed and memory reasons)\n\nimg1 = img1[20:160, 35:180, :, :]\n# roi1 = roi1[20:160, 35:180, :]\n\n# ~~~~~~~~~~~~~~~~~~ Running Perfusion SLIC ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nprint(\"Initialise the perf slic class\")\n\nps1 = PerfSLIC(img1, vox_size)\n\nprint(\"Normalising image...\")\n\nps1.normalise_curves()\n\nprint(\"Extracting features...\")\n\nps1.feature_extraction(n_components=3)\n\nprint(\"Extracting supervoxels...\")\n\nsegments = ps1.supervoxel_extraction(compactness=0.02, segment_size=1000)\n\n# Plot the PCA modes\nps1.plot_pca_modes()\n\n# Plot a static version of the supervoxels\nps1.plotstatic()\n\n# plot a dynamic version of the image in the background\nps1.plotdynamic(img_slice=54, save_animation=True)\n\n# ~~~~~~~~~~~~~~~~~~~~~ Saving region ~~~~~~~~~~~~~~~~~~~~~~~~~~~\ntime_complete = time() - start1\n#\nprint(\"Saving a nifti version of the extracted segments \", end=\"\")\n\nfile1 = 'slic_regions.nii'\n\nrb1 = np.array(segments, dtype=np.int)\nimg = nib.Nifti1Image(rb1, np.eye(4))\nimg.update_header()\nimg.to_filename(file1)\n\n\nprint(\"Done\")\n\n\n\n\n", "repo_name": "benjaminirving/perfusion-slic", "sub_path": "perfusion_slic_demo.py", "file_name": "perfusion_slic_demo.py", "file_ext": "py", "file_size_in_byte": 2746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "73", "api": [{"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 69, "usage_type": "call"}, {"api_name": "perfusionslic.PerfSLIC", "line_number": 85, "usage_type": "call"}, {"api_name": "time.time", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 115, "usage_type": "attribute"}, {"api_name": "nibabel.Nifti1Image", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 116, "usage_type": "call"}]}
+{"seq_id": "41512063177", "text": "from django.urls import path\nfrom . import views\n\napp_name = 'resultApp'\n\nurlpatterns = [\n path('danger/', views.danger_result_view, name='danger_result'),\n path('safe/', views.safe_result_view, name='safe_result'),\n path('send_email/', views.send_email, name='send_email'),\n]", "repo_name": "oblsoun/VOVsnap", "sub_path": "resultApp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "40538310000", "text": "import api, points, base64, math, codecs\nfrom api.bottle import *\nfrom shutil import copyfile\n\ndef get_page(n):\n return math.ceil(n / 50)\n\ndef echoes(subscription):\n allechoareas = []\n for echoarea in subscription:\n temp = echoarea\n if not request.get_cookie(echoarea[0]):\n response.set_cookie(echoarea[0], api.get_last_msgid(echoarea[0]), path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n current = request.get_cookie(echoarea[0], secret='some-secret-key')\n if not current:\n current = api.get_last_msgid(echoarea[0])\n echoarea_msglist = api.get_echoarea(echoarea[0])\n\n new = 0\n last = False\n if len(echoarea_msglist) > 0:\n if current in echoarea_msglist:\n new = int(api.get_echoarea_count(echoarea[0])) - echoarea_msglist.index(current) - 1\n\n if new > 0:\n last = echoarea_msglist[-new];\n else:\n last = echoarea_msglist[-1];\n\n temp.append(new)\n temp.append(last)\n if last and len(last) > 0:\n temp.append(get_page(api.get_echoarea(echoarea[0]).index(last)))\n else:\n temp.append(get_page(len(api.get_echoarea(echoarea[0]))))\n allechoareas.append(temp)\n return allechoareas\n\n@route(\"/\")\ndef index():\n api.load_config()\n echoareas = []\n s = request.get_cookie(\"subscription\", secret='some-secret-key')\n if not s:\n subscription = []\n for ea in api.echoareas:\n subscription.append(ea[0])\n response.set_cookie(\"subscription\", subscription, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n s = subscription\n if api.nosubscription:\n subscription = api.echoareas\n else:\n subscription = []\n for ea in s:\n flag = False\n for e in api.echoareas:\n if ea in e:\n flag = True\n subscription.append(e)\n if not flag:\n subscription.append([ea, \"\"])\n ea = [[echoarea[0], echoarea[1], api.get_time(echoarea[0])] for echoarea in subscription]\n for echoarea in sorted(ea, key=lambda ea: ea[2], reverse=True)[0:5]:\n last = request.get_cookie(echoarea[0], secret='some-secret-key')\n if not last in api.get_echo_msgids(echoarea[0]):\n last = False\n if not last or len(last) == 0:\n last = api.get_last_msgid(echoarea[0])\n if last and len(last) > 0:\n page = get_page(api.get_echoarea(echoarea[0]).index(last))\n else:\n page = get_page(len(api.get_echoarea(echoarea[0])))\n echoareas.append({\"echoname\": echoarea[0], \"count\": api.get_echoarea_count(echoarea[0]), \"dsc\": echoarea[1], \"msg\": api.get_last_msg(echoarea[0]), \"last\": last, \"page\": page})\n allechoareas = echoes(subscription)\n auth = request.get_cookie(\"authstr\")\n msgfrom, addr = points.check_point(auth)\n return template(\"tpl/index.tpl\", nodename=api.nodename, dsc=api.nodedsc, echoareas=echoareas, allechoareas=allechoareas, addr=addr, auth=auth, background=api.background, nosubscription=api.nosubscription)\n\n@route(\"/echolist\")\ndef echolist():\n api.load_config()\n echoareas = []\n s = request.get_cookie(\"subscription\", secret='some-secret-key')\n if not s:\n subscription = []\n for ea in api.echoareas:\n subscription.append(ea[0])\n response.set_cookie(\"subscription\", subscription, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n s = subscription\n if api.nosubscription:\n subscription = api.echoareas\n else:\n subscription = []\n for ea in s:\n flag = False\n for e in api.echoareas:\n if ea in e:\n flag = True\n subscription.append(e)\n if not flag:\n subscription.append([ea, \"\"])\n allechoareas = echoes(subscription)\n auth = request.get_cookie(\"authstr\")\n msgfrom, addr = points.check_point(auth)\n return template(\"tpl/echolist.tpl\", nodename=api.nodename, dsc=api.nodedsc, allechoareas=allechoareas, addr=addr, auth=auth, background=api.background, nosubscription=api.nosubscription)\n\ndef ffeed(echoarea, msgid, page):\n api.load_config()\n msglist = api.get_echoarea(echoarea)\n result = []\n last = request.get_cookie(echoarea, secret='some-secret-key')\n if not last in api.get_echoarea(echoarea):\n last = False\n if not last or len(last) == 0:\n last = api.get_last_msgid(echoarea)\n if not page:\n if not last:\n page = get_page(len(msglist))\n if page == 0:\n page = 1\n else:\n page = get_page(msglist.index(last))\n page = int(page)\n start = page * 50 - 50\n end = start + 50\n for mid in msglist[start:end]:\n msg = api.get_msg(mid).split(\"\\n\")\n if len(msg) > 1:\n result.append([mid, msg])\n ea = [ea for ea in api.echoareas if ea[0] == echoarea]\n if len(ea) != 1:\n ea = [echoarea, \"\"]\n else:\n ea = ea[0]\n auth = request.get_cookie(\"authstr\")\n if len(msglist) <= end:\n end = api.get_last_msgid(echoarea)\n else:\n end = msglist[end]\n response.set_cookie(echoarea, end, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n return template(\"tpl/feed.tpl\", nodename=api.nodename, dsc=api.nodedsc, echoarea=ea, page=page, msgs=result, msgid=msgid, background=api.background, auth=auth)\n\n@route(\"/.\")\n@route(\"/./\")\n@route(\"/.//\")\ndef echoreas(e1, e2, msgid=False, page=False):\n echoarea=e1 + \".\" + e2\n if not request.get_cookie(echoarea):\n response.set_cookie(echoarea, api.get_last_msgid(echoarea), max_age=180*24*60*60, secret='some-secret-key')\n last = msgid or request.get_cookie(echoarea, secret='some-secret-key')\n if not last in api.get_echoarea(echoarea):\n last = False\n if not last or len(last) == 0:\n last = api.get_last_msgid(echoarea)\n index = api.get_echoarea(echoarea)\n if len(index) > 0 and index[-1] != last and last in index:\n last = index[index.index(last) + 1]\n if len(index) == 0:\n last = False\n if echoarea != \"favicon.ico\":\n if last:\n redirect(\"/\" + last)\n else:\n redirect(\"/new/\" + echoarea)\n\n@route(\"/\")\ndef showmsg(msgid):\n api.load_config()\n if api.msg_filter(msgid):\n body = api.get_msg(msgid).split(\"\\n\")\n if body != [\"\"]:\n msgfrom, addr = points.check_point(request.get_cookie(\"authstr\"))\n kludges = body[0].split(\"/\")\n if \"repto\" in kludges:\n repto = kludges[kludges.index(\"repto\") + 1]\n else:\n repto = False\n if len(body) > 0:\n echoarea = [ea for ea in api.echoareas if ea[0] == body[1]]\n if len(echoarea) == 0:\n echoarea = [body[1], \"\"]\n else:\n echoarea = echoarea[0]\n else:\n echoarea = [\"\", \"\"]\n t = api.formatted_time(body[2])\n point = body[3]\n address = body[4]\n to = body[5]\n subj = body[6]\n body = body[8:]\n index = api.get_echoarea(echoarea[0])\n current = index.index(msgid)\n response.set_cookie(echoarea[0], msgid, max_age=180*24*60*60, secret='some-secret-key')\n auth = request.get_cookie(\"authstr\")\n return template(\"tpl/message.tpl\", nodename=api.nodename, echoarea=echoarea, index=index, msgid=msgid, repto=repto, current=current, time=t, point=point, address=address, to=to, subj=subj, body=body, msgfrom=msgfrom, background=api.background, auth=auth)\n else:\n redirect(\"/\")\n else:\n redirect(\"/\")\n\n@route(\"/msglist/\")\n@route(\"/msglist//\")\n@route(\"/msglist///\")\ndef msg_list(echoarea, page=False, msgid=False):\n api.load_config()\n msglist = api.get_echoarea(echoarea)\n result = []\n for mid in msglist:\n msg = api.get_msg(mid).split(\"\\n\")\n try:\n subject = msg[6]\n f = msg[3]\n t = msg[5]\n result.append({\"msgid\": mid, \"subject\": subject, \"from\": f, \"to\": t})\n except:\n None\n ea = [ea for ea in api.echoareas if ea[0] == echoarea]\n if len(ea) == 0:\n ea = [echoarea, '']\n else:\n ea = ea[0]\n if not page:\n if not msgid:\n page = get_page(len(msglist))\n else:\n page = get_page(msglist.index(msgid))\n if page == 0:\n page = 1\n return template(\"tpl/msglist.tpl\", nodename=api.nodename, dsc=api.nodedsc, page=int(page), echoarea=ea, msgid=msgid, msglist=result, topiclist=False, background=api.background)\n\n@route(\"/new/.\")\n@route(\"/reply/.\")\n@route(\"/reply/./\")\ndef reply(e1, e2, msgid = False):\n echoarea = e1 + \".\" + e2\n auth = request.get_cookie(\"authstr\")\n if msgid:\n msg = api.get_msg(msgid).split(\"\\n\")\n else:\n msg = False\n return template(\"tpl/reply.tpl\", nodename=api.nodename, dsc=api.nodedsc, echoarea=echoarea, msgid=msgid, msg=msg, auth=auth, hidehome=False, topiclist=False, background=api.background)\n\n@post(\"/a/savemsg/\")\n@post(\"/a/savemsg//\")\ndef save_messsage(echoarea, msgid = False):\n if api.echo_filter(echoarea):\n subj = request.forms.get(\"subj\")\n msgbody = request.forms.get(\"msgbody\")\n if len(subj) > 0 and len(msgbody) > 0:\n pauth = request.forms.get(\"authstr\")\n msgfrom, addr = points.check_point(pauth)\n if not addr:\n return \"auth error!\"\n response.set_cookie(\"authstr\", pauth, path=\"/\", max_age=3600000000)\n msg = \"\"\n msg = msg + echoarea + \"\\n\"\n msg = msg + request.forms.get(\"to\") + \"\\n\"\n msg = msg + subj + \"\\n\\n\"\n if msgid:\n msg = msg + \"@repto:\" + msgid + \"\\n\"\n msg = msg + msgbody\n msg = base64.b64encode(msg.encode(\"utf8\"))\n message=api.toss_msg(msgfrom, addr, msg)\n if message.startswith(\"msg ok\"):\n redirect(\"/%s\" % message[7:])\n else:\n redirect(\"/\")\n\n@post(\"/a/savefile\")\ndef savefile():\n auth = request.get_cookie(\"authstr\")\n username, addr = points.check_point(auth)\n if addr:\n dest = request.forms.get(\"dest\")\n fileecho = request.forms.get(\"fileecho\")\n tfileecho = request.forms.get(\"tfileecho\")\n f = request.files.get(\"file\")\n dsc = request.forms.get(\"dsc\")\n if fileecho == \"\":\n fecho = tfileecho\n else:\n fecho = fileecho\n path = \"files/\" + fecho\n if not api.file_filter(f.raw_filename):\n return template(\"tpl/upload_message.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, message=\"Некорректное имя файла\")\n if api.fecho_filter(fecho):\n f.save(\"temp\")\n if not os.path.exists(\"files/%s\" % fecho):\n os.makedirs(\"files/%s\" % fecho)\n hsh = api.fhsh(open(\"./temp\", \"rb\").read())\n hshs = []\n try:\n for row in open(\"fecho/%s\" % fecho, \"r\").read().split(\"\\n\"):\n hshs.append(row.split(\":\")[0])\n except:\n None\n blacklist = open(\"fblacklist.txt\", \"r\").read().split(\"\\n\")\n if not hsh in hshs and not hsh in blacklist:\n name = f.raw_filename\n while os.path.exists(\"files/%s/%s\" % (fecho, name)):\n tmp = name.split(\".\")\n name = \".\".join(tmp[:-1])\n suffix = name.split(\"_\")[-1]\n if suffix == name:\n suffix = \"0\"\n try:\n s = int(suffix)\n s += 1\n post = \"_\" + str(s)\n except:\n post = \"_1\"\n if suffix != \"0\":\n Name = name.replace(\"_\" + suffix, post) + \".\" + tmp[-1]\n else:\n name = name + post + \".\" + tmp[-1]\n try:\n size = str(os.stat(\"temp\").st_size)\n except:\n size = \"0\"\n copyfile(\"temp\", \"files/%s/%s\" % (fecho, name))\n os.remove(\"temp\")\n codecs.open(\"fecho/%s\" % fecho, \"a\", \"utf8\").write(\"%s:%s:%s:%s,%s:%s\\n\" % (hsh, name, size, api.nodename, addr, dsc.replace(\"\\n\", \" \").replace(\"\\r\", \"\")))\n codecs.open(\"files/indexes/files.txt\", \"a\", \"utf8\").write(\"%s/%s:%s\\n\" % (fecho, name, dsc.replace(\"\\n\", \" \").replace(\"\\r\", \"\")))\n return template(\"tpl/upload_message.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, message=\"Файл успешно загружен\")\n else:\n os.remove(\"./temp\")\n return template(\"tpl/upload_message.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, message=\"Такой файл уже существует\")\n os.remove(\"./temp\")\n else:\n return template(\"tpl/upload_message.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, message=\"Некорректное имя файлэхоконференции\")\n else:\n redirect(\"/\")\n\n@post(\"/s/subscription\")\n@route(\"/s/subscription\")\ndef subscription():\n api.load_config()\n s = request.forms.get(\"subscription\")\n subscription = []\n if request.forms.get(\"default\"):\n for ea in api.echoareas:\n subscription.append(ea[0])\n response.set_cookie(\"subscription\", subscription, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n redirect(\"/\")\n if s:\n for ea in s.strip().replace(\"\\r\", \"\").split(\"\\n\"):\n if api.echo_filter(ea):\n subscription.append(ea)\n response.set_cookie(\"subscription\", subscription, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n redirect(\"/\")\n subscription = request.get_cookie(\"subscription\", secret='some-secret-key')\n echoareas = []\n for echoarea in api.echoareas:\n echoareas.append([echoarea[0], api.get_echoarea_count(echoarea[0]), echoarea[1]])\n return template(\"tpl/subscription.tpl\", nodename=api.nodename, dsc=api.nodedsc, echoareas=echoareas, subscription=subscription, background=api.background)\n\ndef sort_files(files):\n filelist = []\n for f in sorted(files):\n if f[0].endswith(\"/\") and not f in filelist:\n filelist.append(f)\n for f in sorted(files):\n if not f in filelist:\n filelist.append(f)\n return filelist\n\n@route(\"/s/filelist\")\n@route(\"/s/filelist/\")\ndef filelist(d = False):\n auth = request.get_cookie(\"authstr\")\n msgfrom, addr = points.check_point(auth)\n files = api.get_public_file_index(d)\n if not addr:\n return template(\"tpl/filelist.tpl\", nodename=api.nodename, dsc=api.nodedsc, files=sort_files(files), auth=False, background=api.background, d=d)\n files = files + api.get_file_index(d)\n try:\n files = files + api.get_private_file_index(msgfrom, d)\n except:\n None\n return template(\"tpl/filelist.tpl\", nodename=api.nodename, dsc=api.nodedsc, files=sort_files(files), auth=auth, background=api.background, d=d)\n\n@route(\"/s/download/\")\ndef download(filename):\n filename = filename.split(\"/\")\n return static_file(filename[-1], \"files/%s\" % \"/\".join(filename[:-1]))\n\n@route(\"/s/blacklisted/\")\ndef blacklist(msgid):\n if api.msg_filter(msgid):\n auth = request.get_cookie(\"authstr\")\n if points.is_operator(auth):\n api.delete_msg(msgid)\n open(\"blacklist.txt\", \"a\").write(msgid + \"\\n\")\n redirect(\"/\")\n\n@route(\"/login\")\n@post(\"/login\")\ndef login():\n username = request.forms.get(\"username\")\n password = request.forms.get(\"password\")\n auth = points.login(username, password)\n if auth:\n if auth != \"error\":\n response.set_cookie(\"authstr\", auth, path=\"/\", max_age=3600000000)\n redirect(\"/\")\n else:\n return template(\"tpl/login.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, username=username, auth=auth, registration=api.registration, alarm=\"Неверные учётные данные!\")\n return template(\"tpl/login.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, registration=api.registration, username=False, auth=False, alarm=False)\n\n@route(\"/profile\")\ndef profile():\n auth = request.get_cookie(\"authstr\")\n username, addr = points.check_point(auth)\n return template(\"tpl/profile.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, username=username, auth=auth, addr=addr)\n\n@route(\"/logout\")\ndef logout():\n response.set_cookie(\"authstr\", \"\", path=\"/\", max_age=-1, expires=0)\n redirect(\"/\")\n\n@route(\"/registration\")\n@post(\"/registration\")\ndef registration():\n if api.registration:\n username = request.forms.get(\"username\")\n password = request.forms.get(\"password\")\n if username and password:\n if points.check_username(username):\n return template(\"tpl/registration.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, alarm=\"Имя пользователя уже существует.\")\n else:\n hsh, phash = points.make_point(username, password)\n points.save_point(phash, username, hsh)\n response.set_cookie(\"authstr\", phash, path=\"/\", max_age=3600000000)\n redirect(\"/\")\n return template(\"tpl/registration.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, alarm=False)\n else:\n redirect(\"/\")\n\n@route(\"/s/upload\")\ndef upload_form():\n auth = request.get_cookie(\"authstr\")\n username, addr = points.check_point(auth)\n if addr:\n return template(\"tpl/upload.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, fechoareas=api.fechoareas)\n else:\n redirect(\"/\")\n\n@route(\"/rss/\")\ndef rss(echoarea):\n response.set_header(\"content-type\", \"application/rss+xml; charset=utf-8\")\n api.load_config()\n msglist = api.get_echoarea(echoarea)\n msgs = []\n for msgid in msglist[-50:]:\n msgs.append([msgid, api.get_msg(msgid).split(\"\\n\")])\n return template(\"tpl/rss.tpl\", nodename=api.nodename, dsc=api.nodedsc, nodeurl=api.nodeurl, msgs=reversed(msgs), echoarea=echoarea)\n\n@route(\"/lib/css/\")\ndef pcss(filename):\n return static_file(filename, root=\"lib/css/\")\n\n@route(\"/lib/fonts/\")\ndef pcss(filename):\n return static_file(filename, root=\"lib/fonts/\")\n\n@route(\"/lib/\")\ndef plib(filename):\n return static_file(filename, root=\"lib/\")\n", "repo_name": "idec-net/iing", "sub_path": "api/web.py", "file_name": "web.py", "file_ext": "py", "file_size_in_byte": 19134, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "73", "api": [{"api_name": "math.ceil", "line_number": 6, "usage_type": "call"}, {"api_name": "api.get_last_msgid", "line_number": 13, "usage_type": "call"}, {"api_name": "api.get_last_msgid", "line_number": 16, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 17, "usage_type": "call"}, {"api_name": "api.get_echoarea_count", "line_number": 23, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 33, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 35, "usage_type": "call"}, {"api_name": "api.load_config", "line_number": 41, "usage_type": "call"}, {"api_name": "api.echoareas", "line_number": 46, "usage_type": "attribute"}, {"api_name": "api.nosubscription", "line_number": 50, "usage_type": "attribute"}, {"api_name": "api.echoareas", "line_number": 51, "usage_type": "attribute"}, {"api_name": "api.echoareas", "line_number": 56, "usage_type": "attribute"}, {"api_name": "api.get_time", "line_number": 62, "usage_type": "call"}, {"api_name": "api.get_echo_msgids", "line_number": 65, "usage_type": "call"}, {"api_name": "api.get_last_msgid", "line_number": 68, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 70, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 72, "usage_type": "call"}, {"api_name": "api.get_echoarea_count", "line_number": 73, "usage_type": "call"}, {"api_name": "api.get_last_msg", "line_number": 73, "usage_type": "call"}, {"api_name": "points.check_point", "line_number": 76, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 77, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 77, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 77, "usage_type": "attribute"}, {"api_name": "api.nosubscription", "line_number": 77, "usage_type": "attribute"}, {"api_name": "api.load_config", "line_number": 81, "usage_type": "call"}, {"api_name": "api.echoareas", "line_number": 86, "usage_type": "attribute"}, {"api_name": "api.nosubscription", "line_number": 90, "usage_type": "attribute"}, {"api_name": "api.echoareas", "line_number": 91, "usage_type": "attribute"}, {"api_name": "api.echoareas", "line_number": 96, "usage_type": "attribute"}, {"api_name": "points.check_point", "line_number": 104, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 105, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 105, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 105, "usage_type": "attribute"}, {"api_name": "api.nosubscription", "line_number": 105, "usage_type": "attribute"}, {"api_name": "api.load_config", "line_number": 108, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 109, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 112, "usage_type": "call"}, {"api_name": "api.get_last_msgid", "line_number": 115, "usage_type": "call"}, {"api_name": "api.get_msg", "line_number": 127, "usage_type": "call"}, {"api_name": "api.echoareas", "line_number": 130, "usage_type": "attribute"}, {"api_name": "api.get_last_msgid", "line_number": 137, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 141, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 141, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 141, "usage_type": "attribute"}, {"api_name": "api.get_last_msgid", "line_number": 149, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 151, "usage_type": "call"}, {"api_name": "api.get_last_msgid", "line_number": 154, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 155, "usage_type": "call"}, {"api_name": "api.load_config", "line_number": 168, "usage_type": "call"}, {"api_name": "api.msg_filter", "line_number": 169, "usage_type": "call"}, {"api_name": "api.get_msg", "line_number": 170, "usage_type": "call"}, {"api_name": "points.check_point", "line_number": 172, "usage_type": "call"}, {"api_name": "api.echoareas", "line_number": 179, "usage_type": "attribute"}, {"api_name": "api.formatted_time", "line_number": 186, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 192, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 196, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 196, "usage_type": "attribute"}, {"api_name": "api.load_config", "line_number": 206, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 207, "usage_type": "call"}, {"api_name": "api.get_msg", "line_number": 210, "usage_type": "call"}, {"api_name": "api.echoareas", "line_number": 218, "usage_type": "attribute"}, {"api_name": "api.nodename", "line_number": 230, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 230, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 230, "usage_type": "attribute"}, {"api_name": "api.get_msg", "line_number": 239, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 242, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 242, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 242, "usage_type": "attribute"}, {"api_name": "api.echo_filter", "line_number": 247, "usage_type": "call"}, {"api_name": "points.check_point", "line_number": 252, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 263, "usage_type": "call"}, {"api_name": "api.toss_msg", "line_number": 264, "usage_type": "call"}, {"api_name": "points.check_point", "line_number": 273, "usage_type": "call"}, {"api_name": "api.file_filter", "line_number": 285, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 286, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 286, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 286, "usage_type": "attribute"}, {"api_name": "api.fecho_filter", "line_number": 287, "usage_type": "call"}, {"api_name": "api.fhsh", "line_number": 291, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 321, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 323, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 323, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 324, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 325, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 325, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 325, "usage_type": "attribute"}, {"api_name": "api.nodename", "line_number": 328, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 328, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 328, "usage_type": "attribute"}, {"api_name": "api.nodename", "line_number": 331, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 331, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 331, "usage_type": "attribute"}, {"api_name": "api.load_config", "line_number": 338, "usage_type": "call"}, {"api_name": "api.echoareas", "line_number": 342, "usage_type": "attribute"}, {"api_name": "api.echo_filter", "line_number": 348, "usage_type": "call"}, {"api_name": "api.echoareas", "line_number": 354, "usage_type": "attribute"}, {"api_name": "api.get_echoarea_count", "line_number": 355, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 356, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 356, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 356, "usage_type": "attribute"}, {"api_name": "points.check_point", "line_number": 372, "usage_type": "call"}, {"api_name": "api.get_public_file_index", "line_number": 373, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 375, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 375, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 375, "usage_type": "attribute"}, {"api_name": "api.get_file_index", "line_number": 376, "usage_type": "call"}, {"api_name": "api.get_private_file_index", "line_number": 378, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 381, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 381, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 381, "usage_type": "attribute"}, {"api_name": "api.msg_filter", "line_number": 390, "usage_type": "call"}, {"api_name": "points.is_operator", "line_number": 392, "usage_type": "call"}, {"api_name": "api.delete_msg", "line_number": 393, "usage_type": "call"}, {"api_name": "points.login", "line_number": 402, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 408, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 408, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 408, "usage_type": "attribute"}, {"api_name": "api.registration", "line_number": 408, "usage_type": "attribute"}, {"api_name": "api.nodename", "line_number": 409, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 409, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 409, "usage_type": "attribute"}, {"api_name": "api.registration", "line_number": 409, "usage_type": "attribute"}, {"api_name": "points.check_point", "line_number": 414, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 415, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 415, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 415, "usage_type": "attribute"}, {"api_name": "api.registration", "line_number": 425, "usage_type": "attribute"}, {"api_name": "points.check_username", "line_number": 429, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 430, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 430, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 430, "usage_type": "attribute"}, {"api_name": "points.make_point", "line_number": 432, "usage_type": "call"}, {"api_name": "points.save_point", "line_number": 433, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 436, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 436, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 436, "usage_type": "attribute"}, {"api_name": "points.check_point", "line_number": 443, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 445, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 445, "usage_type": "attribute"}, {"api_name": "api.background", "line_number": 445, "usage_type": "attribute"}, {"api_name": "api.fechoareas", "line_number": 445, "usage_type": "attribute"}, {"api_name": "api.load_config", "line_number": 452, "usage_type": "call"}, {"api_name": "api.get_echoarea", "line_number": 453, "usage_type": "call"}, {"api_name": "api.get_msg", "line_number": 456, "usage_type": "call"}, {"api_name": "api.nodename", "line_number": 457, "usage_type": "attribute"}, {"api_name": "api.nodedsc", "line_number": 457, "usage_type": "attribute"}, {"api_name": "api.nodeurl", "line_number": 457, "usage_type": "attribute"}]}
+{"seq_id": "27651727805", "text": "from constant import *\nimport datetime\nfrom tabulate import *\n\n\nborder = \"+-------------------------+\"\n\ndef print_security_algo_supported(enb_cipher_algo_supported, integrityProtAlgorithm_eNB, epc_cipher_algo_supported, epc_integ_algo_supported, preferred_algorithms):\n print(\"\\n\" + border)\n print(\"| eNB Security Algorithms |\")\n print(border)\n print(\"| {:<28} | {:<}\".format(\"eNB_cipheringAlgorithm_supported\", str(enb_cipher_algo_supported)))\n print(\"| {:<28} | {:<}\".format(\"eNB_integrityAlgorithm_supported\", str(integrityProtAlgorithm_eNB)))\n print(\"| {:<28} | {:<}\".format(\"eNB_preferred_cipheringAlgorithm\", str(preferred_algorithms[0])))\n print(\"| {:<28} | {:<}\".format(\"eNB_preferred_integrityAlgorithm\", str(preferred_algorithms[1])))\n print(\"\\n\" + border)\n print(\"| EPC Security Algorithms |\")\n print(border)\n print(\"| {:<28} | {:<}\".format(\"EPC_cipheringAlgorithm_supported\", str(epc_cipher_algo_supported)))\n print(\"| {:<28} | {:<}\".format(\"EPC_integrityAlgorithm_supported\", str(epc_integ_algo_supported)))\n print(\"| {:<28} | {:<}\".format(\"EPC_preferred_cipheringAlgorithm\", str(preferred_algorithms[2])))\n print(\"| {:<28} | {:<}\".format(\"EPC_preferred_integrityAlgorithm\", str(preferred_algorithms[3])))\n print_table()\n\n\n\ndef print_table():\n print(\"\\n\\nThe table below provides a summary of the algorithm types and their corresponding descriptions.\")\n parameters = [\"Type\", \"Decription\"]\n data = [\n [\"EEA0\", \"Null ciphering algorithm\"],\n [\"EEA1\", \"SNOW 3G\"],\n [\"EEA2\", \"AES\"],\n [\"EEA3\", \"ZUC\"]\n ]\n\n # Crea la tabella utilizzando tabulate\n table = tabulate(data, headers=parameters, tablefmt=\"grid\")\n\n # Stampa la tabella\n print(table)\n\ndef print_cell_identity(mcc, mnc, cellReservedForOperatorUse, trackingAreaCode, cellIdentity, intraFreqReselection, cellBarred):\n print(\"\\n\\n\")\n print(border)\n print(\"| Cell identity |\")\n print(border)\n print(\"| {:<28} | {:<}\".format(\"mcc\", str(mcc)))\n print(\"| {:<28} | {:<}\".format(\"mnc\", str(mnc)))\n #print(\"| {:<28} | {:<}\".format(\"cellReservedForOperatorUse\", cellReservedForOperatorUse))\n print(\"| {:<28} | {:<}\".format(\"trackingAreaCode\", trackingAreaCode))\n print(\"| {:<28} | {:<}\".format(\"cellIdentity\", cellIdentity))\n #print(\"| {:<28} | {:<}\".format(\"intraFreqReselection\", intraFreqReselection))\n #print(\"| {:<28} | {:<}\".format(\"cellBarred\", cellBarred))\n \n\ndef print_header():\n current_time = datetime.datetime.now() \n print(f\"Starting 5GMap (https://github.com/chiacchius/5gmap) at {current_time.strftime('%Y-%m-%d %H:%M %Z')}\")\n print(\"It may take several minutes.\")\n print(\"\"\"\n╭─────────────────────────────────────────────────╮\n│ .------. ,----. ,--. ,--. ,---. ,------. │\n│ | .--.' ' .-./ | `.' | / O \\\\ | .--. ' │\n│ '---. \\\\ | | .---.| |'.'| || .-. || '--' | │\n│ .---' / ' '--' || | | || | | || | --' │\n│ `----' `------' `--' `--'`--' `--'`--' │\n╰─────────────────────────────────────────────────╯\n\"\"\")\n if REAL_TESTING:\n #manage imsi \n print(\"[5GMAP] Binding to Base Station\")\n\n else:\n\n print(\"[5GMAP] Simulation with srsran\")\n\n\ndef parse_file(file_path):\n with open(file_path, 'r') as file:\n current_section = None\n sections = {}\n lines = []\n rrc_uplink_messages = []\n rrc_downlink_messages = []\n nas_uplink_messages = []\n nas_downlink_messages = []\n\n for line in file:\n\n if line.startswith(\"MIB:\") | line.startswith(\"SIB1:\") | line.startswith(\"SIB2:\") | line.startswith(\"SIB3:\"):\n if current_section is not None:\n sections[current_section] = lines\n\n current_section = line[:-2]\n lines = \"\"\n\n elif line.startswith(\"UPLINK_RRC:\"):\n if current_section is not None:\n sections[current_section] = lines\n current_section = None\n rrc_uplink_messages.append(line[11:-1])\n\n elif line.startswith(\"DOWNLINK_RRC:\"):\n if current_section is not None:\n sections[current_section] = lines\n current_section = None\n rrc_downlink_messages.append(line[13:-1])\n \n elif line.startswith(\"UPLINK_NAS:\"):\n if current_section is not None:\n sections[current_section] = lines\n current_section = None\n nas_uplink_messages.append(line[11:-1])\n\n elif line.startswith(\"DOWNLINK_NAS:\"):\n if current_section is not None:\n sections[current_section] = lines\n current_section = None\n nas_downlink_messages.append(line[13:-1])\n\n else:\n lines = lines + line\n\n # Aggiungiamo l'ultima sezione trovata\n if current_section is not None:\n sections[current_section] = lines\n\n sections[\"UPLINK_RRC\"] = rrc_uplink_messages\n sections[\"DOWNLINK_RRC\"] = rrc_downlink_messages\n sections[\"UPLINK_NAS\"] = nas_uplink_messages\n sections[\"DOWNLINK_NAS\"] = nas_downlink_messages\n return sections\n \n", "repo_name": "chiacchius/5gmap", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5494, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "attribute"}]}
+{"seq_id": "11316294009", "text": "import urllib\nimport base64\n\nfrom app import utils\nfrom app.clients.base import BaseHttpClient\n\nfrom app.config import settings\n\nimport httpx\n \nclass SpotifyClient(BaseHttpClient):\n def __init__(self):\n self.client_id = settings.spotify_client_id\n self.secret = settings.spotify_client_secret\n self.api_base_url = 'https://api.spotify.com/v1'\n self.account_base_url = 'https://accounts.spotify.com'\n self.redirect_uri = 'http://localhost:80/callback'\n \n def make_authorize_url(self):\n url = self.account_base_url + '/authorize'\n verifier, challenge = utils.gen_code_verifier()\n\n params = {\n 'client_id': self.client_id,\n 'response_type': 'code',\n 'redirect_uri': self.redirect_uri,\n 'state': verifier,\n 'scope': 'playlist-modify-public',\n 'code_challenge_method': 'S256',\n 'code_challenge': challenge,\n }\n\n return url + '?' + urllib.parse.urlencode(params)\n \n def get_token(self, code: str, state: str):\n url = self.account_base_url + '/api/token'\n \n headers = {\n 'Authorization': f\"Basic {base64.b64encode(f'{self.client_id}:{self.secret}'.encode()).decode()}\",\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n \n data = {\n 'client_id': self.client_id,\n 'grant_type': 'authorization_code',\n 'redirect_uri': self.redirect_uri,\n 'code': code,\n 'code_verifier': state,\n }\n \n resp = httpx.post(url, headers=headers, data=data)\n \n return resp.json()\n\n \n \n", "repo_name": "rjnocelli/the-music-discoverer", "sub_path": "backend/app/clients/spotify.py", "file_name": "spotify.py", "file_ext": "py", "file_size_in_byte": 1687, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "app.clients.base.BaseHttpClient", "line_number": 11, "usage_type": "name"}, {"api_name": "app.config.settings.spotify_client_id", "line_number": 13, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "app.config.settings.spotify_client_secret", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "app.utils.gen_code_verifier", "line_number": 21, "usage_type": "call"}, {"api_name": "app.utils", "line_number": 21, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 33, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 39, "usage_type": "call"}, {"api_name": "httpx.post", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "72268909675", "text": "#! /usr/bin/python3\n\nimport os\nimport pdfrw\nimport PlayerCharacter as pc\nimport json\nimport register\n\ntemplatePath = 'template.pdf'\n\nannotMarker = '/Annots'\nfieldMarker = '/T'\nsubMarker = '/Subtype'\nwidgetMarker = '/Widget'\n\ndef writePDF(inputPath, outputPath, dataDict):\n\t\"\"\"Function to fill in the forms of a PDF with values from Dict.\n\t\n\t:param inputPath: Path of template PDF\n\t:param outputPath: Path to make new PDF at\n\t:param dataDict: Dictionary with keys with same name as form fields.\n\t\"\"\"\n\ttemplatePDF = pdfrw.PdfReader(inputPath)\n\tannotations = templatePDF.pages[0][annotMarker]\n\tfor annotation in annotations:\n\t\tif annotation[subMarker] == widgetMarker:\n\t\t\tif annotation[fieldMarker]:\n\t\t\t\tkey = annotation[fieldMarker][1:-1]\n\t\t\t\tif key in dataDict.keys():\n\t\t\t\t\tannotation.update(pdfrw.PdfDict(V='{}'.format(dataDict[key])))\n\n\tpdfrw.PdfWriter().write(outputPath, templatePDF)\n\n# Make a Character\nplayer = pc.PlayerCharacter()\nregister.registerOptions(player)\n\ndefaultData = {\n\t'CharacterName': player.name,\n\t'STR': player.pstr,\n\t'STRmod': player.strmod,\n\t'DEX': player.dex,\n\t'DEXmod ': player.dexmod,\n\t'CON': player.con,\n\t'CONmod': player.conmod,\n\t'INT': player.pint,\n\t'INTmod': player.intmod,\n\t'WIS': player.wis,\n\t'WISmod': player.wismod,\n\t'CHA': player.cha,\n\t'CHamod': player.chamod,\n\t'ClassLevel': player.classes,\n\t'Background': player.background,\n\t'PlayerName': player.pname,\n\t'Race ': player.race,\n\t'Alignment': player.alignment,\n\t'XP': player.xp,\n\t'AC': player.ac,\n\t'Initiative': player.init,\n\t'Speed': player.speed,\n\t'HPMax': player.hpmax,\n\t'HPCurrent': player.hpcurrent,\n\t'HPTemp': player.hptemp,\n\t'HDTotal': player.hptotal,\n\t'HD': player.hd,\n\t'PersonalityTraits ': player.ptraits,\n\t'Ideals': player.ideals,\n\t'Bonds': player.bonds,\n\t'Flaws': player.flaws,\n\t'ProficienciesLang': player.profs,\n\t'Features and Traits': player.features,\n\t'Equipment': \"\\n\".join(player.equipment),\n\t'CP': player.cp,\n\t'SP': player.sp,\n\t'EP': player.ep,\n\t'GP': player.gp,\n\t'PP': player.pp,\n\t'AttacksSpellcasting': player.attacks,\n\t'Wpn Name': player.wpn1,\n\t'Wpn Name 2': player.wpn2,\n\t'Wpn Name 3': player.wpn3,\n\t'Wpn1 AtkBonus': player.wpn1atk,\n\t'Wpn2 AtkBonus ': player.wpn2atk,\n\t'Wpn3 AtkBonus ': player.wpn3atk,\n\t'Wpn1 Damage': player.wpn1dmg,\n\t'Wpn2 Damage ': player.wpn2dmg,\n\t'Wpn3 Damage ': player.wpn3dmg,\n\t'Inspiration': player.inspir,\n\t'ProfBonus': player.prof,\n\t'ST Strength': player.ststr,\n\t'ST Dexterity': player.stdex,\n\t'ST Constitution': player.stcon,\n\t'ST Intelligence': player.stint,\n\t'ST Wisdom': player.stwis,\n\t'ST Charisma': player.stcha,\n\t'Acrobatics': player.skillProfs['acrobatics'][1],\n\t'Animal': player.skillProfs['animal'][1],\n\t'Arcana': player.skillProfs['aracna'][1],\n\t'Athletics': player.skillProfs['athletics'][1],\n\t'Deception ': player.skillProfs['deception'][1],\n\t'History ': player.skillProfs['history'][1],\n\t'Insight': player.skillProfs['insight'][1],\n\t'Intimidation': player.skillProfs['intimidation'][1],\n\t'Investigation ': player.skillProfs['investigation'][1],\n\t'Medicine': player.skillProfs['medicine'][1],\n\t'Nature': player.skillProfs['nature'][1],\n\t'Perception ': player.skillProfs['perception'][1],\n\t'Performance': player.skillProfs['performance'][1],\n\t'Persuasion': player.skillProfs['persuasion'][1],\n\t'Religion': player.skillProfs['religion'][1],\n\t'SleightofHand': player.skillProfs['sleight'][1],\n\t'Stealth ': player.skillProfs['stealth'][1],\n\t'Survival': player.skillProfs['survival'][1],\n\t'Passive': player.passivep #Passive Perception\n}\n\nwritePDF(templatePath, \"test.pdf\", defaultData)", "repo_name": "Ethck/dndCharGen", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3530, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "pdfrw.PdfReader", "line_number": 23, "usage_type": "call"}, {"api_name": "pdfrw.PdfDict", "line_number": 30, "usage_type": "call"}, {"api_name": "pdfrw.PdfWriter", "line_number": 32, "usage_type": "call"}, {"api_name": "PlayerCharacter.PlayerCharacter", "line_number": 35, "usage_type": "call"}, {"api_name": "register.registerOptions", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "35424856637", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#!/usr/bin/env python\n# coding: utf-8\n\n'''\nrun ./train.py --sdept ../../trained_sde_model/4fold_3_2_layer_model.pt\n'''\n\n# 0 define backend\nimport sys, os, time\nimport argparse\nimport glob\n\n# %env DDE_BACKEND=tensorflow.compat.v1\n# %env XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/home/cyan3/miniforge/envs/tf\n\nos.environ['DDE_BACKEND'] = \"pytorch\" # v2\nos.environ['XLA_FLAGS'] = \"--xla_gpu_cuda_data_dir=/usr/local/home/cyan3/miniforge/envs/tf\"\n\n# https://stackoverflow.com/questions/68614547/tensorflow-libdevice-not-found-why-is-it-not-found-in-the-searched-path\n# this directory has /nvvm/libdevice/libdevice.10.bc\n\nprint(os.environ['DDE_BACKEND'])\n\nimport torch\ntorch.set_printoptions(precision=3)\ntorch.set_printoptions(sci_mode=False)\nprint(torch.cuda.is_available())\nprint(torch.cuda.device_count())\nprint(torch.cuda.get_device_name(0))\nprint(torch.version.cuda)\nprint(torch.cuda.current_device())\ntorch.cuda.set_device(0)\n\n# https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html\ntry:\n torch.jit.enable_onednn_fusion(True)\nexcept:\n print(\"no onednn\")\n\ncuda0 = torch.device('cuda:0')\ncpu = torch.device('cpu')\ndevice = cuda0\n\nimport deepxde as dde\nimport numpy as np\nfrom numpy import linalg as LA\nimport math\nimport matplotlib.pyplot as plt\nimport pylab\nfrom os.path import dirname, join as pjoin\nfrom scipy import stats\nimport scipy.io\nfrom scipy.stats import truncnorm, norm\nfrom scipy.optimize import linprog\nfrom scipy import sparse\nfrom scipy.stats import multivariate_normal\nfrom scipy.spatial.distance import cdist\nif dde.backend.backend_name == \"pytorch\":\n exp = dde.backend.torch.exp\nelse:\n from deepxde.backend import tf\n exp = tf.exp\nimport cvxpy as cp\nimport numpy as np\nfrom scipy.linalg import solve_discrete_are\nfrom scipy.linalg import sqrtm as sqrtm2\n\n######################################\n\nimport torch\nfrom torch.autograd import Function\nimport numpy as np\nimport scipy.linalg\n\nsys.path.insert(0,'..')\nfrom layers import *\n\nsde_path = './sde/T_t200_2D/'\nsys.path.insert(0,sde_path)\nfrom trained_sde_model import *\n\nfrom common import *\n\n\n# In[2]:\n\n\ndef tcst1(x, y, network_f, network_g, args):\n psi, rho, u1, u2 = y[:, 0:1], y[:, 1:2], y[:, 2:3], y[:, 3:4]\n\n # x = c10, c12, t\n\n # psi eq (4a), rho eq (4b), u1 eq (6), u2 eq (6)\n dpsi_c10 = dde.grad.jacobian(psi, x, j=0)\n dpsi_c12 = dde.grad.jacobian(psi, x, j=1)\n dpsi_t = dde.grad.jacobian(psi, x, j=2)\n\n hpsi_c10 = dde.grad.hessian(psi, x, i=0, j=0)\n hpsi_c12 = dde.grad.hessian(psi, x, i=1, j=1)\n\n drho_t = dde.grad.jacobian(rho, x, j=2)\n\n drho_c10 = dde.grad.hessian(rho, x, i=0, j=0)\n drho_c12 = dde.grad.hessian(rho, x, i=1, j=1)\n\n # d1\n leaf_x = x[:, 0:2].detach()\n leaf_u1_u2 = y[:, 2:4].detach()\n leaf_t = x[:, 2].detach().unsqueeze(1)\n\n ###########################################\n\n leaf_vec = torch.cat(\n (\n x[:, 0:2], # leaf_x,\n # i think this makes sense since we\n # take jacobian of it w.r.t x for divergence\n y[:, 2:4],\n x[:, 2].unsqueeze(1),\n ),\n dim=1)\n leaf_vec = leaf_vec.requires_grad_(True)\n\n d1 = network_f.forward(leaf_vec)\n d2 = network_g.forward(leaf_vec)**2 / 2 # elementwise\n # divergence terms\n d_rhod1_c10 = dde.grad.jacobian(rho*d1[:, 0], x, j=0)\n d_rhod1_c12 = dde.grad.jacobian(rho*d1[:, 1], x, j=1)\n\n ###########################################\n\n # divergence = trace of jacobian\n # divergence is a scalar\n\n u_term = torch.mul(dpsi_c10.squeeze(), d1[:, 0])\\\n + torch.mul(dpsi_c12.squeeze(), d1[:, 1])\\\n + torch.mul(d2[:, 0], hpsi_c10.squeeze())\\\n + torch.mul(d2[:, 1], hpsi_c12.squeeze()).unsqueeze(dim=0)\n\n # import ipdb; ipdb.set_trace()\n\n d_uterm_du1_du2 = torch.autograd.grad(\n outputs=u_term,\n inputs=leaf_vec,\n grad_outputs=torch.ones_like(u_term),\n retain_graph=True)[0]\n\n l_u1 = u1 - d_uterm_du1_du2[:, 2]\n l_u2 = u2 - d_uterm_du1_du2[:, 3]\n if args.bound_u > 0:\n # print(\"bounding u\")\n l_u1_bound = -torch.sum(u1[u1 < -0.005]) +\\\n torch.sum(u1[u1 > 0.005]) \n l_u2_bound = -torch.sum(u2[u2 < -0.005]) +\\\n torch.sum(u2[u2 > 0.005])\n\n l_u1 += args.bound_u * l_u1_bound\n l_u2 += args.bound_u * l_u2_bound\n\n return [\n -dpsi_t + 0.5 * (u1**2 + u2**2)\\\n - (dpsi_c10 * d1[:, 0] + dpsi_c12 * d1[:, 1])\\\n - (d2[:, 0] * hpsi_c10 + d2[:, 1] * hpsi_c12),\n\n -drho_t - (d_rhod1_c10 + d_rhod1_c12)\\\n + (d2[:, 0] * drho_c10 + d2[:, 1] * drho_c12),\n\n l_u1,\n l_u2\n ]\n\ndef get_model(\n d,\n N,\n batchsize,\n model_type,\n activations, # sigmoid, tanh\n mu_0,\n sigma_0,\n mu_T,\n sigma_T,\n T_t,\n args,\n network_f,\n network_g,\n optimizer=\"adam\",\n init=\"Glorot normal\",\n train_distribution=\"Hammersley\",\n timemode=0,\n ni=0,\n epsilon=1e-3\n ):\n M = N**d\n\n linspaces = []\n for i in range(d):\n linspaces.append(np.transpose(\n np.linspace(args.state_bound_min, args.state_bound_max, N))\n )\n\n linspace_tensors = []\n for i in range(d):\n t = torch.from_numpy(\n linspaces[i]).requires_grad_(False)\n t = t.to(device)\n linspace_tensors.append(t)\n\n meshes = np.meshgrid(*linspaces)\n mesh_vectors = []\n for i in range(d):\n mesh_vectors.append(meshes[i].reshape(M,1))\n state = np.hstack(tuple(mesh_vectors))\n\n ######################################\n\n rv0 = multivariate_normal(mu_0, sigma_0 * np.eye(d))\n rvT = multivariate_normal(mu_T, sigma_T * np.eye(d))\n\n rho0=rv0.pdf(state)\n rho0 = np.float32(rho0)\n\n rhoT= rvT.pdf(state)\n rhoT = np.float32(rhoT)\n\n ######################################\n\n time_0=np.hstack((\n state,\n T_0*np.ones((len(mesh_vectors[0]), 1))\n ))\n \n if batchsize is not None:\n rho_0_BC = dde.icbc.PointSetBC(\n time_0,\n rho0[..., np.newaxis],\n component=1,\n batch_size=batchsize,\n shuffle=True\n )\n else:\n rho_0_BC = dde.icbc.PointSetBC(\n time_0,\n rho0[..., np.newaxis],\n component=1,\n )\n\n ######################################\n\n time_t=np.hstack((\n state,\n T_t*np.ones((len(mesh_vectors[0]), 1))\n ))\n \n if batchsize is not None:\n rho_T_BC = dde.icbc.PointSetBC(\n time_t,\n rhoT[..., np.newaxis],\n component=1,\n batch_size=batchsize,\n shuffle=True\n )\n else:\n rho_T_BC = dde.icbc.PointSetBC(\n time_t,\n rhoT[..., np.newaxis],\n component=1,\n )\n\n ######################################\n\n geom=dde.geometry.geometry_3d.Cuboid(\n [args.state_bound_min]*d,\n [args.state_bound_max]*d)\n timedomain = dde.geometry.TimeDomain(0., T_t)\n\n geomtime = dde.geometry.GeometryXTime(geom, timedomain)\n\n bif = samples_between_initial_and_final\n if args.bif > 0:\n bif = args.bif\n\n batchsize2 = None\n if len(args.batchsize2) > 0:\n batchsize2 = int(args.batchsize2)\n\n # dde.data.TimePDE\n data = WASSPDE(\n geomtime,\n lambda x, y: tcst1(\n x, y, network_f, network_g, args),\n [rho_0_BC,rho_T_BC],\n num_domain=bif,\n num_initial=ni, # initial_samples,\n train_distribution=train_distribution,\n domain_batch_size=batchsize2\n )\n\n # d+1 inputs: + t\n # 5 outputs: 2 eq\n net = dde.nn.FNN(\n [d+1] + [70] *4 + [4],\n # \"sigmoid\",\n activations,\n init\n # \"zeros\",\n )\n model = model_types[model_type](data, net)\n\n ######################################\n\n losses=[\n \"MSE\",\"MSE\", \"MSE\", \"MSE\",\n \"MSE\",\n \"MSE\",\n ]\n # loss functions are based on PDE + BC: eq outputs, BCs\n\n model.compile(\"adam\", lr=1e-3,loss=losses)\n\n # import ipdb; ipdb.set_trace()\n\n return model, meshes\n\nif __name__ == '__main__':\n\n # In[3]:\n\n\n sde = SDE()\n # state path to model information file\n # load model parameters\n\n files = glob.glob(\n sde_path + \"/*.pt\", \n recursive = False)\n assert(len(files) == 1)\n print(\"using model: \", files[0])\n sde.load_state_dict(torch.load(files[0]))\n\n if torch.cuda.is_available():\n print(\"Using GPU.\")\n sde = sde.to(cuda0)\n # set model to evaluation mode\n sde.eval()\n\n\n # In[4]:\n\n\n d = 2\n N = 15\n batchsize = None\n\n mu_0 = [0.35, 0.35]\n\n sigma = 0.1\n T_t = 200.0\n bcc = np.array([0.41235, 0.37605])\n\n class Container(object):\n state_bound_min = 0.1\n state_bound_max = 0.6\n bound_u = 0\n \n bif = 100000\n batchsize2 = \"5000\"\n batch2_period = 5000\n args = Container()\n\n num_epochs = 15000\n de = 1000\n\n\n # In[5]:\n\n\n model, meshes = get_model(\n d,\n N,\n batchsize,\n 0,\n \"tanh\",\n\n mu_0,\n sigma,\n\n bcc,\n sigma,\n\n T_t,\n args,\n sde.network_f,\n sde.network_g,\n )\n\n print(model)\n\n\n # In[6]:\n\n\n resampler_cb = PDEPointResampler2(\n pde_points=True,\n bc_points=False,\n period=args.batch2_period)\n ck_path = \"./tt200_2d_mse\"\n\n start = time.time()\n losshistory, train_state = model.train(\n iterations=num_epochs,\n display_every=de,\n callbacks=[resampler_cb],\n model_save_path=ck_path)\n end = time.time()\n\n dde.saveplot(losshistory, train_state, issave=True, isplot=True)\n model_path = model.save(ck_path)\n print(model_path)\n\n # \n", "repo_name": "cyan-at/gradschool", "sub_path": "research/tcst/training/iman/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 9785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.set_printoptions", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.set_printoptions", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.cuda.get_device_name", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.version", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.cuda.current_device", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.jit.enable_onednn_fusion", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.jit", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 47, "usage_type": "call"}, {"api_name": "deepxde.backend", "line_number": 64, "usage_type": "attribute"}, {"api_name": "deepxde.backend", "line_number": 65, "usage_type": "attribute"}, {"api_name": "deepxde.backend.tf.exp", "line_number": 68, "usage_type": "attribute"}, {"api_name": "deepxde.backend.tf", "line_number": 68, "usage_type": "name"}, {"api_name": "sys.path.insert", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 85, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "deepxde.grad.jacobian", "line_number": 100, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 100, "usage_type": "attribute"}, {"api_name": "deepxde.grad.jacobian", "line_number": 101, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 101, "usage_type": "attribute"}, {"api_name": "deepxde.grad.jacobian", "line_number": 102, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 102, "usage_type": "attribute"}, {"api_name": "deepxde.grad.hessian", "line_number": 104, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 104, "usage_type": "attribute"}, {"api_name": "deepxde.grad.hessian", "line_number": 105, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 105, "usage_type": "attribute"}, {"api_name": "deepxde.grad.jacobian", "line_number": 107, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 107, "usage_type": "attribute"}, {"api_name": "deepxde.grad.hessian", "line_number": 109, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 109, "usage_type": "attribute"}, {"api_name": "deepxde.grad.hessian", "line_number": 110, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 110, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 119, "usage_type": "call"}, {"api_name": "deepxde.grad.jacobian", "line_number": 133, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 133, "usage_type": "attribute"}, {"api_name": "deepxde.grad.jacobian", "line_number": 134, "usage_type": "call"}, {"api_name": "deepxde.grad", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.mul", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.ones_like", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 218, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 222, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 235, "usage_type": "call"}, {"api_name": "deepxde.icbc.PointSetBC", "line_number": 239, "usage_type": "call"}, {"api_name": "deepxde.icbc", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 241, "usage_type": "attribute"}, {"api_name": "deepxde.icbc.PointSetBC", "line_number": 247, "usage_type": "call"}, {"api_name": "deepxde.icbc", "line_number": 247, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 257, "usage_type": "call"}, {"api_name": "deepxde.icbc.PointSetBC", "line_number": 261, "usage_type": "call"}, {"api_name": "deepxde.icbc", "line_number": 261, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 263, "usage_type": "attribute"}, {"api_name": "deepxde.icbc.PointSetBC", "line_number": 269, "usage_type": "call"}, {"api_name": "deepxde.icbc", "line_number": 269, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 271, "usage_type": "attribute"}, {"api_name": "deepxde.geometry.geometry_3d.Cuboid", "line_number": 277, "usage_type": "call"}, {"api_name": "deepxde.geometry", "line_number": 277, "usage_type": "attribute"}, {"api_name": "deepxde.geometry.TimeDomain", "line_number": 280, "usage_type": "call"}, {"api_name": "deepxde.geometry", "line_number": 280, "usage_type": "attribute"}, {"api_name": "deepxde.geometry.GeometryXTime", "line_number": 282, "usage_type": "call"}, {"api_name": "deepxde.geometry", "line_number": 282, "usage_type": "attribute"}, {"api_name": "deepxde.nn.FNN", "line_number": 306, "usage_type": "call"}, {"api_name": "deepxde.nn", "line_number": 306, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 339, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 344, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 346, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 346, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 364, "usage_type": "call"}, {"api_name": "time.time", "line_number": 414, "usage_type": "call"}, {"api_name": "time.time", "line_number": 420, "usage_type": "call"}, {"api_name": "deepxde.saveplot", "line_number": 422, "usage_type": "call"}]}
+{"seq_id": "26547108976", "text": "\"\"\"\nUtility functions to get language specific formats.\n\nThese functions are taken from the original django implementation and updated\nto fit our needs.\n\nThe original code can be found here:\nhttps://github.com/django/django/blob/master/django/utils/formats.py\n\n\"\"\"\nfrom django.conf import settings\n# when working with django versions prior to 1.5, we need to use smart_str\n# instead of force_str\ntry:\n from django.utils.encoding import force_str as str_encode\nexcept ImportError:\n from django.utils.encoding import smart_str as str_encode\n\ntry:\n from importlib import import_module\nexcept ImportError:\n from django.utils.importlib import import_module\nfrom django.utils.translation import (\n check_for_language,\n get_language,\n to_locale\n)\n\nCUSTOM_FORMAT_MODULE_PATHS = getattr(settings, 'CUSTOM_FORMAT_MODULE_PATHS',\n ['localized_names.formats'])\n\n# format_cache is a mapping from (format_type, lang) to the format string.\n# By using the cache, it is possible to avoid running get_format_modules\n# repeatedly.\n_format_cache = {}\n_format_modules_cache = {}\n\nISO_INPUT_FORMATS = {\n 'DATE_INPUT_FORMATS': ('%Y-%m-%d',),\n 'TIME_INPUT_FORMATS': ('%H:%M:%S', '%H:%M:%S.%f', '%H:%M'),\n 'DATETIME_INPUT_FORMATS': (\n '%Y-%m-%d %H:%M:%S',\n '%Y-%m-%d %H:%M:%S.%f',\n '%Y-%m-%d %H:%M',\n '%Y-%m-%d'\n ),\n}\n\n\ndef iter_format_modules(lang):\n \"\"\"\n Does the heavy lifting of finding format modules.\n\n \"\"\"\n if check_for_language(lang):\n format_locations = []\n for path in CUSTOM_FORMAT_MODULE_PATHS:\n format_locations.append(path + '.%s')\n format_locations.append('django.conf.locale.%s')\n locale = to_locale(lang)\n locales = [locale]\n if '_' in locale:\n locales.append(locale.split('_')[0])\n for location in format_locations:\n for loc in locales:\n try:\n yield import_module('.formats', location % loc)\n except ImportError:\n pass\n\n\ndef get_format_modules(lang=None, reverse=False):\n \"\"\"\n Returns a list of the format modules found\n\n \"\"\"\n if lang is None:\n lang = get_language()\n modules = _format_modules_cache.setdefault(lang, list(\n iter_format_modules(lang)))\n if reverse:\n return list(reversed(modules))\n return modules\n\n\ndef get_format(format_type, lang=None, use_l10n=None):\n \"\"\"\n For a specific format type, returns the format for the current\n language (locale), defaults to the format in the settings.\n format_type is the name of the format, e.g. 'DATE_FORMAT'\n\n If use_l10n is provided and is not None, that will force the value to\n be localized (or not), overriding the value of settings.USE_L10N.\n\n \"\"\"\n format_type = str_encode(format_type)\n if use_l10n or (use_l10n is None and settings.USE_L10N):\n if lang is None:\n lang = get_language()\n cache_key = (format_type, lang)\n try:\n cached = _format_cache[cache_key]\n if cached is not None:\n return cached\n else:\n # Return the general setting by default\n return getattr(settings, format_type)\n except KeyError:\n for module in get_format_modules(lang):\n try:\n val = getattr(module, format_type)\n for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):\n if iso_input not in val:\n if isinstance(val, tuple):\n val = list(val)\n val.append(iso_input)\n _format_cache[cache_key] = val\n return val\n except AttributeError:\n pass\n _format_cache[cache_key] = None\n return getattr(settings, format_type)\n", "repo_name": "bitlabstudio/django-libs", "sub_path": "django_libs/format_utils.py", "file_name": "format_utils.py", "file_ext": "py", "file_size_in_byte": 3935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 96, "dataset": "github-code", "pt": "73", "api": [{"api_name": "django.conf.settings", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.utils.translation.check_for_language", "line_number": 55, "usage_type": "call"}, {"api_name": "django.utils.translation.to_locale", "line_number": 60, "usage_type": "call"}, {"api_name": "django.utils.importlib.import_module", "line_number": 67, "usage_type": "call"}, {"api_name": "django.utils.translation.get_language", "line_number": 78, "usage_type": "call"}, {"api_name": "django.utils.encoding.smart_str", "line_number": 96, "usage_type": "call"}, {"api_name": "django.conf.settings.USE_L10N", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 97, "usage_type": "name"}, {"api_name": "django.utils.translation.get_language", "line_number": 99, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 107, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 122, "usage_type": "argument"}]}
+{"seq_id": "22999103732", "text": "import datetime\n\nmahasiswa1 = {\n 'nama': 'Asep Sikasep',\n 'nim': '30012023',\n 'sks_lulus': 130,\n 'beasiswa': False,\n 'lahir': datetime.datetime(1999, 5, 10)\n}\n\nmahasiswa2 = {\n 'nama': 'Dimas Majid',\n 'nim': '30012024',\n 'sks_lulus': 140,\n 'beasiswa': True,\n 'lahir': datetime.datetime(2002, 7, 10)\n}\n\nmahasiswa3 = {\n 'nama': 'Udok Nyolodok',\n 'nim': '30012025',\n 'sks_lulus': 110,\n 'beasiswa': True,\n 'lahir': datetime.datetime(2000, 10, 10)\n}\n\ndata_mhs = {\n 'MAH001': mahasiswa1,\n 'MAH002': mahasiswa2,\n 'MAH003': mahasiswa3,\n}\n\nprint(f\"{'KEY':<6} {'Nama':<15} {'NIM':<9} {'SKS':<7} {'Beasiswa':<10} {'Lahir'} \")\nprint('='*60)\n\nfor mahasiswa in data_mhs:\n KEY = mahasiswa\n NAMA = data_mhs[KEY]['nama']\n NIM = data_mhs[KEY]['nim']\n SKS = data_mhs[KEY]['sks_lulus']\n BEASISWA = data_mhs[KEY]['beasiswa']\n LAHIR = data_mhs[KEY]['lahir'].strftime(\"%x\")\n\n print(f\"{KEY:<6} {NAMA:<15} {NIM:<9} {SKS:<7} {BEASISWA:^10} {LAHIR} \")\n", "repo_name": "FaisalMuaris/Belajar-Python", "sub_path": "37.multi-keys-nesting-dictionary.py", "file_name": "37.multi-keys-nesting-dictionary.py", "file_ext": "py", "file_size_in_byte": 999, "program_lang": "python", "lang": "id", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "datetime.datetime", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "38594273972", "text": "import pytest\nimport urllib3\nfrom influx import Influx\n\n@pytest.fixture\ndef test_database():\n ''' Returns a InfluxDB object'''\n token = \"ci_zJ9DSnTbO4fSjRVxKjn2956LhXDre0y8DkMNgMmpp1ptQDsNe_u5RMwxGr0XAN2pjyHOuJ5yAd1KfnQGQUg==\"\n org = \"cheekyagentpotter@gmail.com\"\n bucket = \"test\"\n url=\"https://eu-central-1-1.aws.cloud2.influxdata.com\"\n \n return Influx(token=token, org=org, bucket=bucket, url=url)\n\n@pytest.fixture\ndef test_faulty_database():\n ''' Returns a faulty InfluxDB object'''\n token = \"these\"\n org = \"credentials\"\n bucket = \"won't\"\n url=\"work\"\n \n return Influx(token=token, org=org, bucket=bucket, url=url)\n\ndef test_write_and_query(test_database):\n data = \"mem,host=host1 used_percent=23.43234543\"\n test_database.write(data)\n\n query = '''\n from(bucket: \"test\")\n |> range(start: -30d)\\\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"host\"] == \"host1\")\n |> yield(name: \"mean\")\n '''\n assert test_database.query(query)[0].records[0].get_value() == 23.43234543\n\ndef test_faulty_db(test_faulty_database):\n with pytest.raises(Exception):\n data = \"mem,host=host1 used_percent=23.43234543\"\n test_faulty_database.write(data)\n\n", "repo_name": "jonakr/DBproject", "sub_path": "src/tests/test_influx.py", "file_name": "test_influx.py", "file_ext": "py", "file_size_in_byte": 1372, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "influx.Influx", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 5, "usage_type": "attribute"}, {"api_name": "influx.Influx", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "36246390708", "text": "import asyncio\nfrom datetime import timedelta\nimport logging\nfrom ssl import SSLSession\n\nfrom requests.exceptions import SSLError\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import CONF_PASSWORD, CONF_USERNAME\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.exceptions import ConfigEntryNotReady\nfrom homeassistant.helpers.aiohttp_client import async_create_clientsession\nfrom homeassistant.helpers.update_coordinator import DataUpdateCoordinator\n\nfrom .smarthome import smarthome\nfrom .const import (\n DATA_CLIENT,\n DATA_COORDINATOR,\n DOMAIN,\n DEFAULT_NAME,\n PLATFORMS,\n UPDATE_INTERVAL,\n)\n\n_LOGGER: logging.Logger = logging.getLogger(__package__)\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n \"\"\"Set up this integration using UI.\"\"\"\n if hass.data.get(DOMAIN) is None:\n hass.data.setdefault(DOMAIN, {})\n\n username = entry.data.get(CONF_USERNAME)\n password = entry.data.get(CONF_PASSWORD)\n session = async_create_clientsession(hass, auto_cleanup=True, verify_ssl=False)\n client = smarthome(session, username, password)\n await client.login()\n coordinator = DataUpdateCoordinator(\n hass,\n _LOGGER,\n name=DEFAULT_NAME,\n update_method=client.get_devices,\n update_interval=timedelta(seconds=UPDATE_INTERVAL),\n )\n\n await coordinator.async_refresh()\n\n if not coordinator.last_update_success:\n raise ConfigEntryNotReady\n\n hass.data[DOMAIN][entry.entry_id] = {\n DATA_CLIENT: client,\n DATA_COORDINATOR: coordinator,\n }\n\n for platform in PLATFORMS:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(entry, platform)\n )\n\n entry.add_update_listener(async_reload_entry)\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n \"\"\"Handle removal of an entry.\"\"\"\n unloaded = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, platform)\n for platform in PLATFORMS\n ]\n )\n )\n if unloaded:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unloaded\n\n\nasync def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Reload config entry.\"\"\"\n await async_unload_entry(hass, entry)\n await async_setup_entry(hass, entry)\n", "repo_name": "n71154plus/Hitachi_smart_app", "sub_path": "custom_components/Hitachi_smart_app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 45, "dataset": "github-code", "pt": "73", "api": [{"api_name": "logging.Logger", "line_number": 25, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 28, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 28, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 30, "usage_type": "argument"}, {"api_name": "const.DOMAIN", "line_number": 31, "usage_type": "argument"}, {"api_name": "homeassistant.const.CONF_USERNAME", "line_number": 33, "usage_type": "argument"}, {"api_name": "homeassistant.const.CONF_PASSWORD", "line_number": 34, "usage_type": "argument"}, {"api_name": "homeassistant.helpers.aiohttp_client.async_create_clientsession", "line_number": 35, "usage_type": "call"}, {"api_name": "smarthome.smarthome", "line_number": 36, "usage_type": "call"}, {"api_name": "homeassistant.helpers.update_coordinator.DataUpdateCoordinator", "line_number": 38, "usage_type": "call"}, {"api_name": "const.DEFAULT_NAME", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 43, "usage_type": "call"}, {"api_name": "const.UPDATE_INTERVAL", "line_number": 43, "usage_type": "name"}, {"api_name": "homeassistant.exceptions.ConfigEntryNotReady", "line_number": 49, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 51, "usage_type": "name"}, {"api_name": "const.DATA_CLIENT", "line_number": 52, "usage_type": "name"}, {"api_name": "const.DATA_COORDINATOR", "line_number": 53, "usage_type": "name"}, {"api_name": "const.PLATFORMS", "line_number": 56, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 65, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 65, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 68, "usage_type": "call"}, {"api_name": "const.PLATFORMS", "line_number": 71, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 76, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 81, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 81, "usage_type": "name"}]}
+{"seq_id": "1897038424", "text": "import collections\nimport errno\nimport getpass\nimport glob\nimport os\nimport shutil\nimport socket\nimport stat\nimport sys\nimport tempfile\nimport traceback\nfrom contextlib import contextmanager\nfrom multiprocessing import Process, Queue\n\nimport pytest\n\nimport llnl.util.lock as lk\nimport llnl.util.multiproc as mp\nfrom llnl.util.filesystem import getuid, touch\n\nif sys.platform != \"win32\":\n import fcntl\n\npytestmark = pytest.mark.not_on_windows(\"does not run on windows\")\n\n\n#\n# This test can be run with MPI. MPI is \"enabled\" if we can import\n# mpi4py and the number of total MPI processes is greater than 1.\n# Otherwise it just runs as a node-local test.\n#\n# NOTE: MPI mode is different from node-local mode in that node-local\n# mode will spawn its own test processes, while MPI mode assumes you've\n# run this script as a SPMD application. In MPI mode, no additional\n# processes are spawned, and you need to ensure that you mpirun the\n# script with enough processes for all the multiproc_test cases below.\n#\n# If you don't run with enough processes, tests that require more\n# processes than you currently have will be skipped.\n#\nmpi = False\ncomm = None\ntry:\n from mpi4py import MPI\n\n comm = MPI.COMM_WORLD\n if comm.size > 1:\n mpi = True\nexcept ImportError:\n pass\n\n\n\"\"\"This is a list of filesystem locations to test locks in. Paths are\nexpanded so that %u is replaced with the current username. '~' is also\nlegal and will be expanded to the user's home directory.\n\nTests are skipped for directories that don't exist, so you'll need to\nupdate this with the locations of NFS, Lustre, and other mounts on your\nsystem.\n\"\"\"\nlocations = [\n tempfile.gettempdir(),\n os.path.join(\"/nfs/tmp2/\", getpass.getuser()),\n os.path.join(\"/p/lscratch*/\", getpass.getuser()),\n]\n\n\"\"\"This is the longest a failed multiproc test will take.\nBarriers will time out and raise an exception after this interval.\nIn MPI mode, barriers don't time out (they hang). See mpi_multiproc_test.\n\"\"\"\nbarrier_timeout = 5\n\n\"\"\"This is the lock timeout for expected failures.\nThis may need to be higher for some filesystems.\"\"\"\nlock_fail_timeout = 0.1\n\n\ndef make_readable(*paths):\n # TODO: From os.chmod doc:\n # \"Note Although Windows supports chmod(), you can only\n # set the file's read-only flag with it (via the stat.S_IWRITE and\n # stat.S_IREAD constants or a corresponding integer value). All other\n # bits are ignored.\"\n for path in paths:\n if sys.platform != \"win32\":\n mode = 0o555 if os.path.isdir(path) else 0o444\n else:\n mode = stat.S_IREAD\n os.chmod(path, mode)\n\n\ndef make_writable(*paths):\n for path in paths:\n if sys.platform != \"win32\":\n mode = 0o755 if os.path.isdir(path) else 0o744\n else:\n mode = stat.S_IWRITE\n os.chmod(path, mode)\n\n\n@contextmanager\ndef read_only(*paths):\n modes = [os.stat(p).st_mode for p in paths]\n make_readable(*paths)\n\n yield\n\n for path, mode in zip(paths, modes):\n os.chmod(path, mode)\n\n\n@pytest.fixture(scope=\"session\", params=locations)\ndef lock_test_directory(request):\n \"\"\"This fixture causes tests to be executed for many different mounts.\n\n See the ``locations`` dict above for details.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(scope=\"session\")\ndef lock_dir(lock_test_directory):\n parent = next(\n (p for p in glob.glob(lock_test_directory) if os.path.exists(p) and os.access(p, os.W_OK)),\n None,\n )\n if not parent:\n # Skip filesystems that don't exist or aren't writable\n pytest.skip(\"requires filesystem: '%s'\" % lock_test_directory)\n elif mpi and parent == tempfile.gettempdir():\n # Skip local tmp test for MPI runs\n pytest.skip(\"skipping local tmp directory for MPI test.\")\n\n tempdir = None\n if not mpi or comm.rank == 0:\n tempdir = tempfile.mkdtemp(dir=parent)\n if mpi:\n tempdir = comm.bcast(tempdir)\n\n yield tempdir\n\n if mpi:\n # rank 0 may get here before others, in which case it'll try to\n # remove the directory while other processes try to re-create the\n # lock. This will give errno 39: directory not empty. Use a\n # barrier to ensure everyone is done first.\n comm.barrier()\n\n if not mpi or comm.rank == 0:\n make_writable(tempdir)\n shutil.rmtree(tempdir)\n\n\n@pytest.fixture\ndef private_lock_path(lock_dir):\n \"\"\"In MPI mode, this is a private lock for each rank in a multiproc test.\n\n For other modes, it is the same as a shared lock.\n \"\"\"\n lock_file = os.path.join(lock_dir, \"lockfile\")\n if mpi:\n lock_file += \".%s\" % comm.rank\n\n yield lock_file\n\n if os.path.exists(lock_file):\n make_writable(lock_dir, lock_file)\n os.unlink(lock_file)\n\n\n@pytest.fixture\ndef lock_path(lock_dir):\n \"\"\"This lock is shared among all processes in a multiproc test.\"\"\"\n lock_file = os.path.join(lock_dir, \"lockfile\")\n\n yield lock_file\n\n if os.path.exists(lock_file):\n make_writable(lock_dir, lock_file)\n os.unlink(lock_file)\n\n\ndef test_poll_interval_generator():\n interval_iter = iter(lk.Lock._poll_interval_generator(_wait_times=[1, 2, 3]))\n intervals = list(next(interval_iter) for i in range(100))\n assert intervals == [1] * 20 + [2] * 40 + [3] * 40\n\n\ndef local_multiproc_test(*functions, **kwargs):\n \"\"\"Order some processes using simple barrier synchronization.\"\"\"\n b = mp.Barrier(len(functions), timeout=barrier_timeout)\n\n args = (b,) + tuple(kwargs.get(\"extra_args\", ()))\n procs = [Process(target=f, args=args, name=f.__name__) for f in functions]\n\n for p in procs:\n p.start()\n\n for p in procs:\n p.join()\n\n assert all(p.exitcode == 0 for p in procs)\n\n\ndef mpi_multiproc_test(*functions):\n \"\"\"SPMD version of multiproc test.\n\n This needs to be run like so:\n\n srun spack test lock\n\n Each process executes its corresponding function. This is different\n from ``multiproc_test`` above, which spawns the processes. This will\n skip tests if there are too few processes to run them.\n \"\"\"\n procs = len(functions)\n if procs > comm.size:\n pytest.skip(\"requires at least %d MPI processes\" % procs)\n\n comm.Barrier() # barrier before each MPI test\n\n include = comm.rank < len(functions)\n subcomm = comm.Split(include)\n\n class subcomm_barrier:\n \"\"\"Stand-in for multiproc barrier for MPI-parallel jobs.\"\"\"\n\n def wait(self):\n subcomm.Barrier()\n\n if include:\n try:\n functions[subcomm.rank](subcomm_barrier())\n except BaseException:\n # aborting is the best we can do for MPI tests without\n # hanging, since we're using MPI barriers. This will fail\n # early and it loses the nice pytest output, but at least it\n # gets use a stacktrace on the processes that failed.\n traceback.print_exc()\n comm.Abort()\n subcomm.Free()\n\n comm.Barrier() # barrier after each MPI test.\n\n\n\"\"\"``multiproc_test()`` should be called by tests below.\n``multiproc_test()`` will work for either MPI runs or for local runs.\n\"\"\"\nmultiproc_test = mpi_multiproc_test if mpi else local_multiproc_test\n\n\n#\n# Process snippets below can be composed into tests.\n#\nclass AcquireWrite:\n def __init__(self, lock_path, start=0, length=0):\n self.lock_path = lock_path\n self.start = start\n self.length = length\n\n @property\n def __name__(self):\n return self.__class__.__name__\n\n def __call__(self, barrier):\n lock = lk.Lock(self.lock_path, start=self.start, length=self.length)\n lock.acquire_write() # grab exclusive lock\n barrier.wait()\n barrier.wait() # hold the lock until timeout in other procs.\n\n\nclass AcquireRead:\n def __init__(self, lock_path, start=0, length=0):\n self.lock_path = lock_path\n self.start = start\n self.length = length\n\n @property\n def __name__(self):\n return self.__class__.__name__\n\n def __call__(self, barrier):\n lock = lk.Lock(self.lock_path, start=self.start, length=self.length)\n lock.acquire_read() # grab shared lock\n barrier.wait()\n barrier.wait() # hold the lock until timeout in other procs.\n\n\nclass TimeoutWrite:\n def __init__(self, lock_path, start=0, length=0):\n self.lock_path = lock_path\n self.start = start\n self.length = length\n\n @property\n def __name__(self):\n return self.__class__.__name__\n\n def __call__(self, barrier):\n lock = lk.Lock(self.lock_path, start=self.start, length=self.length)\n barrier.wait() # wait for lock acquire in first process\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n barrier.wait()\n\n\nclass TimeoutRead:\n def __init__(self, lock_path, start=0, length=0):\n self.lock_path = lock_path\n self.start = start\n self.length = length\n\n @property\n def __name__(self):\n return self.__class__.__name__\n\n def __call__(self, barrier):\n lock = lk.Lock(self.lock_path, start=self.start, length=self.length)\n barrier.wait() # wait for lock acquire in first process\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait()\n\n\n#\n# Test that exclusive locks on other processes time out when an\n# exclusive lock is held.\n#\ndef test_write_lock_timeout_on_write(lock_path):\n multiproc_test(AcquireWrite(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_write_2(lock_path):\n multiproc_test(AcquireWrite(lock_path), TimeoutWrite(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_write_3(lock_path):\n multiproc_test(\n AcquireWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_on_write_ranges(lock_path):\n multiproc_test(AcquireWrite(lock_path, 0, 1), TimeoutWrite(lock_path, 0, 1))\n\n\ndef test_write_lock_timeout_on_write_ranges_2(lock_path):\n multiproc_test(\n AcquireWrite(lock_path, 0, 64),\n AcquireWrite(lock_path, 65, 1),\n TimeoutWrite(lock_path, 0, 1),\n TimeoutWrite(lock_path, 63, 1),\n )\n\n\ndef test_write_lock_timeout_on_write_ranges_3(lock_path):\n multiproc_test(\n AcquireWrite(lock_path, 0, 1),\n AcquireWrite(lock_path, 1, 1),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_on_write_ranges_4(lock_path):\n multiproc_test(\n AcquireWrite(lock_path, 0, 1),\n AcquireWrite(lock_path, 1, 1),\n AcquireWrite(lock_path, 2, 456),\n AcquireWrite(lock_path, 500, 64),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\n#\n# Test that shared locks on other processes time out when an\n# exclusive lock is held.\n#\ndef test_read_lock_timeout_on_write(lock_path):\n multiproc_test(AcquireWrite(lock_path), TimeoutRead(lock_path))\n\n\ndef test_read_lock_timeout_on_write_2(lock_path):\n multiproc_test(AcquireWrite(lock_path), TimeoutRead(lock_path), TimeoutRead(lock_path))\n\n\ndef test_read_lock_timeout_on_write_3(lock_path):\n multiproc_test(\n AcquireWrite(lock_path),\n TimeoutRead(lock_path),\n TimeoutRead(lock_path),\n TimeoutRead(lock_path),\n )\n\n\ndef test_read_lock_timeout_on_write_ranges(lock_path):\n \"\"\"small write lock, read whole file.\"\"\"\n multiproc_test(AcquireWrite(lock_path, 0, 1), TimeoutRead(lock_path))\n\n\ndef test_read_lock_timeout_on_write_ranges_2(lock_path):\n \"\"\"small write lock, small read lock\"\"\"\n multiproc_test(AcquireWrite(lock_path, 0, 1), TimeoutRead(lock_path, 0, 1))\n\n\ndef test_read_lock_timeout_on_write_ranges_3(lock_path):\n \"\"\"two write locks, overlapping read locks\"\"\"\n multiproc_test(\n AcquireWrite(lock_path, 0, 1),\n AcquireWrite(lock_path, 64, 128),\n TimeoutRead(lock_path, 0, 1),\n TimeoutRead(lock_path, 128, 256),\n )\n\n\n#\n# Test that exclusive locks time out when shared locks are held.\n#\ndef test_write_lock_timeout_on_read(lock_path):\n multiproc_test(AcquireRead(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_read_2(lock_path):\n multiproc_test(AcquireRead(lock_path), TimeoutWrite(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_read_3(lock_path):\n multiproc_test(\n AcquireRead(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_on_read_ranges(lock_path):\n multiproc_test(AcquireRead(lock_path, 0, 1), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_read_ranges_2(lock_path):\n multiproc_test(AcquireRead(lock_path, 0, 1), TimeoutWrite(lock_path, 0, 1))\n\n\ndef test_write_lock_timeout_on_read_ranges_3(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 1),\n AcquireRead(lock_path, 10, 1),\n TimeoutWrite(lock_path, 0, 1),\n TimeoutWrite(lock_path, 10, 1),\n )\n\n\ndef test_write_lock_timeout_on_read_ranges_4(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 64),\n TimeoutWrite(lock_path, 10, 1),\n TimeoutWrite(lock_path, 32, 1),\n )\n\n\ndef test_write_lock_timeout_on_read_ranges_5(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 64, 128),\n TimeoutWrite(lock_path, 65, 1),\n TimeoutWrite(lock_path, 127, 1),\n TimeoutWrite(lock_path, 90, 10),\n )\n\n\n#\n# Test that exclusive locks time while lots of shared locks are held.\n#\ndef test_write_lock_timeout_with_multiple_readers_2_1(lock_path):\n multiproc_test(AcquireRead(lock_path), AcquireRead(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_with_multiple_readers_2_2(lock_path):\n multiproc_test(\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_3_1(lock_path):\n multiproc_test(\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_3_2(lock_path):\n multiproc_test(\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 10), AcquireRead(lock_path, 2, 10), TimeoutWrite(lock_path, 5, 5)\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 10),\n AcquireRead(lock_path, 5, 15),\n TimeoutWrite(lock_path, 0, 1),\n TimeoutWrite(lock_path, 11, 3),\n TimeoutWrite(lock_path, 7, 1),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 5),\n AcquireRead(lock_path, 5, 5),\n AcquireRead(lock_path, 10, 5),\n TimeoutWrite(lock_path, 0, 15),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 5),\n AcquireRead(lock_path, 5, 5),\n AcquireRead(lock_path, 10, 5),\n TimeoutWrite(lock_path, 3, 10),\n TimeoutWrite(lock_path, 5, 1),\n )\n\n\n@pytest.mark.skipif(getuid() == 0, reason=\"user is root\")\ndef test_read_lock_on_read_only_lockfile(lock_dir, lock_path):\n \"\"\"read-only directory, read-only lockfile.\"\"\"\n touch(lock_path)\n with read_only(lock_path, lock_dir):\n lock = lk.Lock(lock_path)\n\n with lk.ReadTransaction(lock):\n pass\n\n with pytest.raises(lk.LockROFileError):\n with lk.WriteTransaction(lock):\n pass\n\n\ndef test_read_lock_read_only_dir_writable_lockfile(lock_dir, lock_path):\n \"\"\"read-only directory, writable lockfile.\"\"\"\n touch(lock_path)\n with read_only(lock_dir):\n lock = lk.Lock(lock_path)\n\n with lk.ReadTransaction(lock):\n pass\n\n with lk.WriteTransaction(lock):\n pass\n\n\n@pytest.mark.skipif(False if sys.platform == \"win32\" else getuid() == 0, reason=\"user is root\")\ndef test_read_lock_no_lockfile(lock_dir, lock_path):\n \"\"\"read-only directory, no lockfile (so can't create).\"\"\"\n with read_only(lock_dir):\n lock = lk.Lock(lock_path)\n\n with pytest.raises(lk.CantCreateLockError):\n with lk.ReadTransaction(lock):\n pass\n\n with pytest.raises(lk.CantCreateLockError):\n with lk.WriteTransaction(lock):\n pass\n\n\ndef test_upgrade_read_to_write(private_lock_path):\n \"\"\"Test that a read lock can be upgraded to a write lock.\n\n Note that to upgrade a read lock to a write lock, you have the be the\n only holder of a read lock. Client code needs to coordinate that for\n shared locks. For this test, we use a private lock just to test that an\n upgrade is possible.\n \"\"\"\n # ensure lock file exists the first time, so we open it read-only\n # to begin with.\n touch(private_lock_path)\n\n lock = lk.Lock(private_lock_path)\n assert lock._reads == 0\n assert lock._writes == 0\n\n lock.acquire_read()\n assert lock._reads == 1\n assert lock._writes == 0\n assert lock._file.mode == \"r+\"\n\n lock.acquire_write()\n assert lock._reads == 1\n assert lock._writes == 1\n assert lock._file.mode == \"r+\"\n\n lock.release_write()\n assert lock._reads == 1\n assert lock._writes == 0\n assert lock._file.mode == \"r+\"\n\n lock.release_read()\n assert lock._reads == 0\n assert lock._writes == 0\n assert lock._file is None\n\n\ndef test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path):\n \"\"\"Test that read-only file can be read-locked but not write-locked.\"\"\"\n # ensure lock file exists the first time\n touch(private_lock_path)\n\n # open it read-only to begin with.\n with read_only(private_lock_path):\n lock = lk.Lock(private_lock_path)\n assert lock._reads == 0\n assert lock._writes == 0\n\n lock.acquire_read()\n assert lock._reads == 1\n assert lock._writes == 0\n assert lock._file.mode == \"r\"\n\n # upgrade to write here\n with pytest.raises(lk.LockROFileError):\n lock.acquire_write()\n\n # TODO: lk.FILE_TRACKER does not release private_lock_path\n lk.FILE_TRACKER.release_by_stat(os.stat(private_lock_path))\n\n\nclass ComplexAcquireAndRelease:\n def __init__(self, lock_path):\n self.lock_path = lock_path\n\n def p1(self, barrier):\n lock = lk.Lock(self.lock_path)\n\n lock.acquire_write()\n barrier.wait() # ---------------------------------------- 1\n # others test timeout\n barrier.wait() # ---------------------------------------- 2\n lock.release_write() # release and others acquire read\n barrier.wait() # ---------------------------------------- 3\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 4\n lock.release_read()\n barrier.wait() # ---------------------------------------- 5\n\n # p2 upgrades read to write\n barrier.wait() # ---------------------------------------- 6\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 7\n # p2 releases write and read\n barrier.wait() # ---------------------------------------- 8\n\n # p3 acquires read\n barrier.wait() # ---------------------------------------- 9\n # p3 upgrades read to write\n barrier.wait() # ---------------------------------------- 10\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 11\n # p3 releases locks\n barrier.wait() # ---------------------------------------- 12\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 13\n lock.release_read()\n\n def p2(self, barrier):\n lock = lk.Lock(self.lock_path)\n\n # p1 acquires write\n barrier.wait() # ---------------------------------------- 1\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 2\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 3\n # p1 tests shared read\n barrier.wait() # ---------------------------------------- 4\n # others release reads\n barrier.wait() # ---------------------------------------- 5\n\n lock.acquire_write() # upgrade read to write\n barrier.wait() # ---------------------------------------- 6\n # others test timeout\n barrier.wait() # ---------------------------------------- 7\n lock.release_write() # release read AND write (need both)\n lock.release_read()\n barrier.wait() # ---------------------------------------- 8\n\n # p3 acquires read\n barrier.wait() # ---------------------------------------- 9\n # p3 upgrades read to write\n barrier.wait() # ---------------------------------------- 10\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 11\n # p3 releases locks\n barrier.wait() # ---------------------------------------- 12\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 13\n lock.release_read()\n\n def p3(self, barrier):\n lock = lk.Lock(self.lock_path)\n\n # p1 acquires write\n barrier.wait() # ---------------------------------------- 1\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 2\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 3\n # p1 tests shared read\n barrier.wait() # ---------------------------------------- 4\n lock.release_read()\n barrier.wait() # ---------------------------------------- 5\n\n # p2 upgrades read to write\n barrier.wait() # ---------------------------------------- 6\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 7\n # p2 releases write & read\n barrier.wait() # ---------------------------------------- 8\n\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 9\n lock.acquire_write()\n barrier.wait() # ---------------------------------------- 10\n # others test timeout\n barrier.wait() # ---------------------------------------- 11\n lock.release_read() # release read AND write in opposite\n lock.release_write() # order from before on p2\n barrier.wait() # ---------------------------------------- 12\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 13\n lock.release_read()\n\n\n#\n# Longer test case that ensures locks are reusable. Ordering is\n# enforced by barriers throughout -- steps are shown with numbers.\n#\ndef test_complex_acquire_and_release_chain(lock_path):\n test_chain = ComplexAcquireAndRelease(lock_path)\n multiproc_test(test_chain.p1, test_chain.p2, test_chain.p3)\n\n\nclass AssertLock(lk.Lock):\n \"\"\"Test lock class that marks acquire/release events.\"\"\"\n\n def __init__(self, lock_path, vals):\n super().__init__(lock_path)\n self.vals = vals\n\n # assert hooks for subclasses\n assert_acquire_read = lambda self: None\n assert_acquire_write = lambda self: None\n assert_release_read = lambda self: None\n assert_release_write = lambda self: None\n\n def acquire_read(self, timeout=None):\n self.assert_acquire_read()\n result = super().acquire_read(timeout)\n self.vals[\"acquired_read\"] = True\n return result\n\n def acquire_write(self, timeout=None):\n self.assert_acquire_write()\n result = super().acquire_write(timeout)\n self.vals[\"acquired_write\"] = True\n return result\n\n def release_read(self, release_fn=None):\n self.assert_release_read()\n result = super().release_read(release_fn)\n self.vals[\"released_read\"] = True\n return result\n\n def release_write(self, release_fn=None):\n self.assert_release_write()\n result = super().release_write(release_fn)\n self.vals[\"released_write\"] = True\n return result\n\n\n@pytest.mark.parametrize(\n \"transaction,type\", [(lk.ReadTransaction, \"read\"), (lk.WriteTransaction, \"write\")]\n)\ndef test_transaction(lock_path, transaction, type):\n class MockLock(AssertLock):\n def assert_acquire_read(self):\n assert not vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_release_read(self):\n assert vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_acquire_write(self):\n assert not vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_release_write(self):\n assert vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def enter_fn():\n # assert enter_fn is called while lock is held\n assert vals[\"acquired_%s\" % type]\n vals[\"entered_fn\"] = True\n\n def exit_fn(t, v, tb):\n # assert exit_fn is called while lock is held\n assert not vals[\"released_%s\" % type]\n vals[\"exited_fn\"] = True\n vals[\"exception\"] = t or v or tb\n\n vals = collections.defaultdict(lambda: False)\n lock = MockLock(lock_path, vals)\n\n with transaction(lock, acquire=enter_fn, release=exit_fn):\n assert vals[\"acquired_%s\" % type]\n assert not vals[\"released_%s\" % type]\n\n assert vals[\"entered_fn\"]\n assert vals[\"exited_fn\"]\n assert vals[\"acquired_%s\" % type]\n assert vals[\"released_%s\" % type]\n assert not vals[\"exception\"]\n\n\n@pytest.mark.parametrize(\n \"transaction,type\", [(lk.ReadTransaction, \"read\"), (lk.WriteTransaction, \"write\")]\n)\ndef test_transaction_with_exception(lock_path, transaction, type):\n class MockLock(AssertLock):\n def assert_acquire_read(self):\n assert not vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_release_read(self):\n assert vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_acquire_write(self):\n assert not vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_release_write(self):\n assert vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def enter_fn():\n assert vals[\"acquired_%s\" % type]\n vals[\"entered_fn\"] = True\n\n def exit_fn(t, v, tb):\n assert not vals[\"released_%s\" % type]\n vals[\"exited_fn\"] = True\n vals[\"exception\"] = t or v or tb\n return exit_result\n\n exit_result = False\n vals = collections.defaultdict(lambda: False)\n lock = MockLock(lock_path, vals)\n\n with pytest.raises(Exception):\n with transaction(lock, acquire=enter_fn, release=exit_fn):\n raise Exception()\n\n assert vals[\"entered_fn\"]\n assert vals[\"exited_fn\"]\n assert vals[\"exception\"]\n\n # test suppression of exceptions from exit_fn\n exit_result = True\n vals.clear()\n\n # should not raise now.\n with transaction(lock, acquire=enter_fn, release=exit_fn):\n raise Exception()\n\n assert vals[\"entered_fn\"]\n assert vals[\"exited_fn\"]\n assert vals[\"exception\"]\n\n\n@pytest.mark.parametrize(\n \"transaction,type\", [(lk.ReadTransaction, \"read\"), (lk.WriteTransaction, \"write\")]\n)\ndef test_transaction_with_context_manager(lock_path, transaction, type):\n class MockLock(AssertLock):\n def assert_acquire_read(self):\n assert not vals[\"entered_ctx\"]\n assert not vals[\"exited_ctx\"]\n\n def assert_release_read(self):\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n\n def assert_acquire_write(self):\n assert not vals[\"entered_ctx\"]\n assert not vals[\"exited_ctx\"]\n\n def assert_release_write(self):\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n\n class TestContextManager:\n def __enter__(self):\n vals[\"entered_ctx\"] = True\n\n def __exit__(self, t, v, tb):\n assert not vals[\"released_%s\" % type]\n vals[\"exited_ctx\"] = True\n vals[\"exception_ctx\"] = t or v or tb\n return exit_ctx_result\n\n def exit_fn(t, v, tb):\n assert not vals[\"released_%s\" % type]\n vals[\"exited_fn\"] = True\n vals[\"exception_fn\"] = t or v or tb\n return exit_fn_result\n\n exit_fn_result, exit_ctx_result = False, False\n vals = collections.defaultdict(lambda: False)\n lock = MockLock(lock_path, vals)\n\n with transaction(lock, acquire=TestContextManager, release=exit_fn):\n pass\n\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n assert vals[\"exited_fn\"]\n assert not vals[\"exception_ctx\"]\n assert not vals[\"exception_fn\"]\n\n vals.clear()\n with transaction(lock, acquire=TestContextManager):\n pass\n\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n assert not vals[\"exited_fn\"]\n assert not vals[\"exception_ctx\"]\n assert not vals[\"exception_fn\"]\n\n # below are tests for exceptions with and without suppression\n def assert_ctx_and_fn_exception(raises=True):\n vals.clear()\n\n if raises:\n with pytest.raises(Exception):\n with transaction(lock, acquire=TestContextManager, release=exit_fn):\n raise Exception()\n else:\n with transaction(lock, acquire=TestContextManager, release=exit_fn):\n raise Exception()\n\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n assert vals[\"exited_fn\"]\n assert vals[\"exception_ctx\"]\n assert vals[\"exception_fn\"]\n\n def assert_only_ctx_exception(raises=True):\n vals.clear()\n\n if raises:\n with pytest.raises(Exception):\n with transaction(lock, acquire=TestContextManager):\n raise Exception()\n else:\n with transaction(lock, acquire=TestContextManager):\n raise Exception()\n\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n assert not vals[\"exited_fn\"]\n assert vals[\"exception_ctx\"]\n assert not vals[\"exception_fn\"]\n\n # no suppression\n assert_ctx_and_fn_exception(raises=True)\n assert_only_ctx_exception(raises=True)\n\n # suppress exception only in function\n exit_fn_result, exit_ctx_result = True, False\n assert_ctx_and_fn_exception(raises=False)\n assert_only_ctx_exception(raises=True)\n\n # suppress exception only in context\n exit_fn_result, exit_ctx_result = False, True\n assert_ctx_and_fn_exception(raises=False)\n assert_only_ctx_exception(raises=False)\n\n # suppress exception in function and context\n exit_fn_result, exit_ctx_result = True, True\n assert_ctx_and_fn_exception(raises=False)\n assert_only_ctx_exception(raises=False)\n\n\ndef test_nested_write_transaction(lock_path):\n \"\"\"Ensure that the outermost write transaction writes.\"\"\"\n\n def write(t, v, tb):\n vals[\"wrote\"] = True\n\n vals = collections.defaultdict(lambda: False)\n lock = AssertLock(lock_path, vals)\n\n # write/write\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert vals[\"wrote\"]\n\n # read/write\n vals.clear()\n with lk.ReadTransaction(lock):\n assert not vals[\"wrote\"]\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n assert vals[\"wrote\"]\n\n # write/read/write\n vals.clear()\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n with lk.ReadTransaction(lock):\n assert not vals[\"wrote\"]\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert vals[\"wrote\"]\n\n # read/write/read/write\n vals.clear()\n with lk.ReadTransaction(lock):\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n with lk.ReadTransaction(lock):\n assert not vals[\"wrote\"]\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert vals[\"wrote\"]\n\n\ndef test_nested_reads(lock_path):\n \"\"\"Ensure that write transactions won't re-read data.\"\"\"\n\n def read():\n vals[\"read\"] += 1\n\n vals = collections.defaultdict(lambda: 0)\n lock = AssertLock(lock_path, vals)\n\n # read/read\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n # write/write\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n # read/write\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n # write/read/write\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n # read/write/read/write\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n\nclass LockDebugOutput:\n def __init__(self, lock_path):\n self.lock_path = lock_path\n self.host = socket.gethostname()\n\n def p1(self, barrier, q1, q2):\n # exchange pids\n p1_pid = os.getpid()\n q1.put(p1_pid)\n p2_pid = q2.get()\n\n # set up lock\n lock = lk.Lock(self.lock_path, debug=True)\n\n with lk.WriteTransaction(lock):\n # p1 takes write lock and writes pid/host to file\n barrier.wait() # ------------------------------------ 1\n\n assert lock.pid == p1_pid\n assert lock.host == self.host\n\n # wait for p2 to verify contents of file\n barrier.wait() # ---------------------------------------- 2\n\n # wait for p2 to take a write lock\n barrier.wait() # ---------------------------------------- 3\n\n # verify pid/host info again\n with lk.ReadTransaction(lock):\n assert lock.old_pid == p1_pid\n assert lock.old_host == self.host\n\n assert lock.pid == p2_pid\n assert lock.host == self.host\n\n barrier.wait() # ---------------------------------------- 4\n\n def p2(self, barrier, q1, q2):\n # exchange pids\n p2_pid = os.getpid()\n p1_pid = q1.get()\n q2.put(p2_pid)\n\n # set up lock\n lock = lk.Lock(self.lock_path, debug=True)\n\n # p1 takes write lock and writes pid/host to file\n barrier.wait() # ---------------------------------------- 1\n\n # verify that p1 wrote information to lock file\n with lk.ReadTransaction(lock):\n assert lock.pid == p1_pid\n assert lock.host == self.host\n\n barrier.wait() # ---------------------------------------- 2\n\n # take a write lock on the file and verify pid/host info\n with lk.WriteTransaction(lock):\n assert lock.old_pid == p1_pid\n assert lock.old_host == self.host\n\n assert lock.pid == p2_pid\n assert lock.host == self.host\n\n barrier.wait() # ------------------------------------ 3\n\n # wait for p1 to verify pid/host info\n barrier.wait() # ---------------------------------------- 4\n\n\ndef test_lock_debug_output(lock_path):\n test_debug = LockDebugOutput(lock_path)\n q1, q2 = Queue(), Queue()\n local_multiproc_test(test_debug.p2, test_debug.p1, extra_args=(q1, q2))\n\n\ndef test_lock_with_no_parent_directory(tmpdir):\n \"\"\"Make sure locks work even when their parent directory does not exist.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"foo/bar/baz/lockfile\")\n with lk.WriteTransaction(lock):\n pass\n\n\ndef test_lock_in_current_directory(tmpdir):\n \"\"\"Make sure locks work even when their parent directory does not exist.\"\"\"\n with tmpdir.as_cwd():\n # test we can create a lock in the current directory\n lock = lk.Lock(\"lockfile\")\n for i in range(10):\n with lk.ReadTransaction(lock):\n pass\n with lk.WriteTransaction(lock):\n pass\n\n # and that we can do the same thing after it's already there\n lock = lk.Lock(\"lockfile\")\n for i in range(10):\n with lk.ReadTransaction(lock):\n pass\n with lk.WriteTransaction(lock):\n pass\n\n\ndef test_attempts_str():\n assert lk._attempts_str(0, 0) == \"\"\n assert lk._attempts_str(0.12, 1) == \"\"\n assert lk._attempts_str(12.345, 2) == \" after 12.345s and 2 attempts\"\n\n\ndef test_lock_str():\n lock = lk.Lock(\"lockfile\")\n lockstr = str(lock)\n assert \"lockfile[0:0]\" in lockstr\n assert \"timeout=None\" in lockstr\n assert \"#reads=0, #writes=0\" in lockstr\n\n\ndef test_downgrade_write_okay(tmpdir):\n \"\"\"Test the lock write-to-read downgrade operation.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"lockfile\")\n lock.acquire_write()\n lock.downgrade_write_to_read()\n assert lock._reads == 1\n assert lock._writes == 0\n lock.release_read()\n\n\ndef test_downgrade_write_fails(tmpdir):\n \"\"\"Test failing the lock write-to-read downgrade operation.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"lockfile\")\n lock.acquire_read()\n msg = \"Cannot downgrade lock from write to read on file: lockfile\"\n with pytest.raises(lk.LockDowngradeError, match=msg):\n lock.downgrade_write_to_read()\n lock.release_read()\n\n\n@pytest.mark.parametrize(\n \"err_num,err_msg\",\n [\n (errno.EACCES, \"Fake EACCES error\"),\n (errno.EAGAIN, \"Fake EAGAIN error\"),\n (errno.ENOENT, \"Fake ENOENT error\"),\n ],\n)\ndef test_poll_lock_exception(tmpdir, monkeypatch, err_num, err_msg):\n \"\"\"Test poll lock exception handling.\"\"\"\n\n def _lockf(fd, cmd, len, start, whence):\n raise IOError(err_num, err_msg)\n\n with tmpdir.as_cwd():\n lockfile = \"lockfile\"\n lock = lk.Lock(lockfile)\n lock.acquire_read()\n\n monkeypatch.setattr(fcntl, \"lockf\", _lockf)\n\n if err_num in [errno.EAGAIN, errno.EACCES]:\n assert not lock._poll_lock(fcntl.LOCK_EX)\n else:\n with pytest.raises(IOError, match=err_msg):\n lock._poll_lock(fcntl.LOCK_EX)\n\n monkeypatch.undo()\n lock.release_read()\n\n\ndef test_upgrade_read_okay(tmpdir):\n \"\"\"Test the lock read-to-write upgrade operation.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"lockfile\")\n lock.acquire_read()\n lock.upgrade_read_to_write()\n assert lock._reads == 0\n assert lock._writes == 1\n lock.release_write()\n\n\ndef test_upgrade_read_fails(tmpdir):\n \"\"\"Test failing the lock read-to-write upgrade operation.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"lockfile\")\n lock.acquire_write()\n msg = \"Cannot upgrade lock from read to write on file: lockfile\"\n with pytest.raises(lk.LockUpgradeError, match=msg):\n lock.upgrade_read_to_write()\n lock.release_write()\n", "repo_name": "spack/spack", "sub_path": "lib/spack/spack/test/llnl/util/lock.py", "file_name": "lock.py", "file_ext": "py", "file_size_in_byte": 41908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3712, "dataset": "github-code", "pt": "73", "api": [{"api_name": "sys.platform", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pytest.mark.not_on_windows", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 24, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 46, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 46, "usage_type": "name"}, {"api_name": "tempfile.gettempdir", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "getpass.getuser", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "getpass.getuser", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "stat.S_IREAD", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "stat.S_IWRITE", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 98, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 103, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 109, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 101, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 112, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 124, "usage_type": "call"}, {"api_name": "os.W_OK", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pytest.skip", "line_number": 129, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 130, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 132, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 136, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 151, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 168, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 180, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 171, "usage_type": "attribute"}, {"api_name": "llnl.util.lock.Lock._poll_interval_generator", "line_number": 184, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 184, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 184, "usage_type": "name"}, {"api_name": "llnl.util.multiproc.Barrier", "line_number": 191, "usage_type": "call"}, {"api_name": "llnl.util.multiproc", "line_number": 191, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 194, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 218, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 239, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 266, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 266, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 283, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 283, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 300, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 300, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 302, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 302, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 302, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 318, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 318, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 320, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 320, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 320, "usage_type": "name"}, {"api_name": "llnl.util.filesystem.touch", "line_number": 549, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 551, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 551, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 553, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 553, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 556, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockROFileError", "line_number": 556, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 556, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 557, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 557, "usage_type": "name"}, {"api_name": "pytest.mark.skipif", "line_number": 546, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 546, "usage_type": "attribute"}, {"api_name": "llnl.util.filesystem.getuid", "line_number": 546, "usage_type": "call"}, {"api_name": "llnl.util.filesystem.touch", "line_number": 563, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 565, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 565, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 567, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 567, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 570, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 570, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 578, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 578, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 580, "usage_type": "call"}, {"api_name": "llnl.util.lock.CantCreateLockError", "line_number": 580, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 580, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 581, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 581, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 584, "usage_type": "call"}, {"api_name": "llnl.util.lock.CantCreateLockError", "line_number": 584, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 584, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 585, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 585, "usage_type": "name"}, {"api_name": "pytest.mark.skipif", "line_number": 574, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 574, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 574, "usage_type": "attribute"}, {"api_name": "llnl.util.filesystem.getuid", "line_number": 574, "usage_type": "call"}, {"api_name": "llnl.util.filesystem.touch", "line_number": 599, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 601, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 601, "usage_type": "name"}, {"api_name": "llnl.util.filesystem.touch", "line_number": 629, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 633, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 633, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 643, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockROFileError", "line_number": 643, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 643, "usage_type": "name"}, {"api_name": "llnl.util.lock.FILE_TRACKER.release_by_stat", "line_number": 647, "usage_type": "call"}, {"api_name": "llnl.util.lock.FILE_TRACKER", "line_number": 647, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 647, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 647, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 655, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 655, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 663, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 663, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 663, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 672, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 672, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 672, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 674, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 674, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 674, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 684, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 684, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 684, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 686, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 686, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 686, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 696, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 696, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 700, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 700, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 700, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 702, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 702, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 702, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 724, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 724, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 724, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 726, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 726, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 726, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 736, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 736, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 740, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 740, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 740, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 742, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 742, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 742, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 754, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 754, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 754, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 756, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockTimeoutError", "line_number": 756, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 756, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 785, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 785, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 855, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 823, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 823, "usage_type": "attribute"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 824, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 824, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 824, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 901, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 904, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 869, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 869, "usage_type": "attribute"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 870, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 870, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 870, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 963, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 990, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 1007, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 925, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 925, "usage_type": "attribute"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 926, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 926, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 926, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 1046, "usage_type": "call"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1050, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1050, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1052, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1052, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1059, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1059, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1061, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1061, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1067, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1067, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1069, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1069, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1071, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1071, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1079, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1079, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1080, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1080, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1082, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1082, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1084, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1084, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 1097, "usage_type": "call"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1103, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1103, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1105, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1105, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1111, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1111, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1113, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1113, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1119, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1119, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1121, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1121, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1127, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1127, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1129, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1129, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1131, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1131, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1137, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1137, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1139, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1139, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1141, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1141, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1143, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1143, "usage_type": "name"}, {"api_name": "socket.gethostname", "line_number": 1150, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 1154, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1159, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1159, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1161, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1161, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1175, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1175, "usage_type": "name"}, {"api_name": "os.getpid", "line_number": 1186, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1191, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1191, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1197, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1197, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1204, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1204, "usage_type": "name"}, {"api_name": "multiprocessing.Queue", "line_number": 1219, "usage_type": "call"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1226, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1226, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1227, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1227, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1235, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1235, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1237, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1237, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1239, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1239, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1243, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1243, "usage_type": "name"}, {"api_name": "llnl.util.lock.ReadTransaction", "line_number": 1245, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1245, "usage_type": "name"}, {"api_name": "llnl.util.lock.WriteTransaction", "line_number": 1247, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1247, "usage_type": "name"}, {"api_name": "llnl.util.lock._attempts_str", "line_number": 1252, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1252, "usage_type": "name"}, {"api_name": "llnl.util.lock._attempts_str", "line_number": 1253, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1253, "usage_type": "name"}, {"api_name": "llnl.util.lock._attempts_str", "line_number": 1254, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1254, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1258, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1258, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1268, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1268, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1279, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1279, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 1282, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockDowngradeError", "line_number": 1282, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 1282, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1303, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1303, "usage_type": "name"}, {"api_name": "errno.EAGAIN", "line_number": 1308, "usage_type": "attribute"}, {"api_name": "errno.EACCES", "line_number": 1308, "usage_type": "attribute"}, {"api_name": "fcntl.LOCK_EX", "line_number": 1309, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 1311, "usage_type": "call"}, {"api_name": "fcntl.LOCK_EX", "line_number": 1312, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 1287, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 1287, "usage_type": "attribute"}, {"api_name": "errno.EACCES", "line_number": 1290, "usage_type": "attribute"}, {"api_name": "errno.EAGAIN", "line_number": 1291, "usage_type": "attribute"}, {"api_name": "errno.ENOENT", "line_number": 1292, "usage_type": "attribute"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1321, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1321, "usage_type": "name"}, {"api_name": "llnl.util.lock.Lock", "line_number": 1332, "usage_type": "call"}, {"api_name": "llnl.util.lock", "line_number": 1332, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 1335, "usage_type": "call"}, {"api_name": "llnl.util.lock.LockUpgradeError", "line_number": 1335, "usage_type": "attribute"}, {"api_name": "llnl.util.lock", "line_number": 1335, "usage_type": "name"}]}
+{"seq_id": "5463636639", "text": "import argparse\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport torch.optim as op\r\nimport torch.nn as nn\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport math\r\nfrom torchvision import datasets, transforms\r\nimport random\r\n\r\n\r\n\r\nclass AE(nn.Module):\r\n def __init__(self, hidden_layer_size, n_inputs):\r\n super(AE, self).__init__()\r\n self.encoder_LSTM = nn.LSTM(n_inputs, hidden_layer_size, batch_first=True)\r\n self.decoder_LSTM = nn.LSTM(hidden_layer_size, hidden_layer_size, batch_first=True)\r\n self.hidden_layer_size = hidden_layer_size\r\n self.n_inputs = n_inputs\r\n self.func = nn.Linear(hidden_layer_size, n_inputs)\r\n self.indicator = nn.Linear(hidden_layer_size, 10)\r\n\r\n\r\n def forward(self, x_t):\r\n x, (z, y) = self.encoder_LSTM(x_t)\r\n z = z.view(-1, 1, self.hidden_layer_size).repeat(1, x_t.size(1) , 1)\r\n h_temp , s = self.decoder_LSTM(z)\r\n return self.func(h_temp), self.indicator(h_temp)\r\n\r\nclass LSTM_AE_MNIST(Dataset):\r\n def __init__(self, k, *, flatten=False):\r\n self.data = []\r\n self.tags = []\r\n if flatten:\r\n for (i,j) in k:\r\n self.data.extend([i.reshape(-1,1)])\r\n self.tags.extend([j])\r\n else:\r\n for (i,j) in k:\r\n self.data.extend([i.squeeze(0)])\r\n self.tags.extend([j])\r\n\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n def __getitem__(self, item):\r\n return self.data[item], self.tags[item]\r\n\r\n\r\ndef training(epochs, optim, model,clip, data, values):\r\n\r\n vals = []\r\n training =[]\r\n training_success = []\r\n validation_success = []\r\n adder = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n loss_func = nn.MSELoss()\r\n cross_entry_point = nn.CrossEntropyLoss()\r\n # print(\"he\")\r\n for e in range(1, epochs):\r\n total_acc = 0\r\n total_loss =0\r\n total_all = 0\r\n\r\n for datum, tags in data:\r\n # print(\"ha\")\r\n optim.zero_grad()\r\n tags = tags.to(adder)\r\n datum = datum.to(adder)\r\n outputs, new_input = model(datum)\r\n loss = loss_func(outputs, datum)\r\n new_loss = cross_entry_point(new_input.reshape(len(datum), -1), tags)\r\n loss = loss + new_loss\r\n loss.backward()\r\n nn.utils.clip_grad_norm_(model.parameters(), clip)\r\n optim.step()\r\n total_loss = total_loss + loss.item()\r\n with torch.no_grad():\r\n _, new_acc = torch.max(new_input.reshape(len(datum),-1) , 1)\r\n total_acc = total_acc + (new_acc == tags).sum().item()\r\n total_all = total_all + len(datum)\r\n # print(total_acc)\r\n # print(total_all)\r\n\r\n with torch.no_grad():\r\n total_loss_2 = 0\r\n total_acc_2 = 0\r\n total_all_2 = 0\r\n for datum, tags in values:\r\n tags = tags.to(adder)\r\n datum = datum.to(adder)\r\n outputs, new_input = model(datum)\r\n loss = loss_func(outputs, datum)\r\n new_loss = cross_entry_point(new_input.view(len(datum), -1), tags)\r\n loss = loss + new_loss\r\n total_loss_2 = total_loss + loss.item()\r\n _, new_acc = torch.max(new_input.view(len(datum), -1), 1)\r\n total_acc_2 = total_acc_2 + (new_acc == tags).sum().item()\r\n total_all_2 = total_all_2 + len(datum)\r\n\r\n new_item = total_loss/len(data)\r\n new_item_3 = total_acc / total_all\r\n training.append(new_item)\r\n training_success.append(new_item_3)\r\n\r\n new_item2 = total_loss_2/len(values)\r\n new_item_4 = total_acc_2 / total_all_2\r\n vals.append(new_item2)\r\n validation_success.append((new_item_4))\r\n\r\n print(\"~~~~~~~~~~~~~~~~~\")\r\n print(f\" Epoch {e}\\n train loss: {new_item:.3f}\\n val loss: {new_item2:.3f}\\n\")\r\n print(f\"train accuracy: {new_item_3:.3f}\\n validation acc: {new_item_4:.3f}\\n\")\r\n\r\n return training, vals, training_success, validation_success\r\n\r\n\r\n\r\nparser= argparse.ArgumentParser()\r\nparser.add_argument(\"--epochs\", type = int, default = 30)\r\nparser.add_argument(\"--input_size\", type = int, choices=[1,28], default = 1)\r\nparser.add_argument(\"--optim\", choices =[\"sgd\", \"adam\"], default=\"adam\")\r\nparser.add_argument(\"--clip\", type=float, default=0.1)\r\nparser.add_argument(\"--batch_size\", type=int, default=64)\r\nparser.add_argument(\"--hidden_size\", type=int, default=256)\r\nparser.add_argument(\"--lr\", type=float, default=0.001)\r\n\r\narguments = parser.parse_args()\r\n\r\ntran = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\r\nmnist_train = datasets.MNIST(\"./data\", train = True, download = True, transform =tran)\r\nmnist_test = list(datasets.MNIST(\"./data\", train = False, download = True, transform =tran))\r\n\r\nprint(f\"parameters: epochs:{arguments.epochs} input_size: {arguments.input_size}optimizer:{arguments.optim} \"\r\n f\"learning rate:{arguments.lr} clipping:{arguments.clip} batch_size:{arguments.batch_size} hidden_size:{arguments.hidden_size}\")\r\nif arguments.input_size == 1:\r\n data = DataLoader(LSTM_AE_MNIST(mnist_train, flatten=True), batch_size=arguments.batch_size, shuffle=True)\r\n values = DataLoader(LSTM_AE_MNIST(mnist_test[:5000], flatten=True), batch_size=arguments.batch_size,\r\n shuffle=False)\r\n check = DataLoader(LSTM_AE_MNIST(mnist_test[5000:], flatten=True), batch_size=arguments.batch_size,\r\n shuffle=False)\r\n\r\nelse:\r\n data = DataLoader(LSTM_AE_MNIST(mnist_train), batch_size=arguments.batch_size, shuffle=True)\r\n values = DataLoader(LSTM_AE_MNIST(mnist_test[:5000]), batch_size=arguments.batch_size,\r\n shuffle=False)\r\n check = DataLoader(LSTM_AE_MNIST(mnist_test[5000:]), batch_size=arguments.batch_size,\r\n shuffle=False)\r\n\r\nadder = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nmodel = AE(arguments.hidden_size, arguments.input_size).to(adder)\r\n\r\nif arguments.optim == \"sgd\":\r\n optim = op.SGD(model.parameters(), lr=arguments.lr)\r\nelse:\r\n optim = op.Adam(model.parameters(), lr=arguments.lr)\r\n\r\ntraining , vals,training_success, validation_success = training(arguments.epochs, optim, model, arguments.clip, data, values)\r\n\r\ncheck_2 =0\r\ncheck_all =0\r\n\r\nwith torch.no_grad():\r\n model = AE(arguments.hidden_size, arguments.input_size)\r\n adder = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n model = model.to(adder)\r\n for datum, tags in check:\r\n datum = datum.to(adder)\r\n tags = tags.to(adder)\r\n outputs, new_input = model(datum)\r\n _, new_acc = torch.max(new_input.reshape(len(datum), -1), 1)\r\n check_2 = check_2 + (new_acc == tags).sum().item()\r\n check_all = check_all + len(datum)\r\n acc = check_2 / check_all\r\n print(f\"acc : {acc}.3f\")\r\n\r\nplt.title(\"Success per epoch\")\r\nplt.ylabel(\"Accuracy\")\r\nplt.xlabel(\"Epochs\")\r\nplt.plot(validation_success, label=\"Valid\")\r\nplt.plot(training_success, label=\"Train\")\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.title(\"Training Loss per epoch\")\r\nplt.ylabel(\"Loss\")\r\nplt.xlabel(\"Epochs\")\r\nplt.plot(vals, label=\"Valid\")\r\nplt.plot(training, label=\"Train\")\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n\r\ndatum = random.choice(next(iter(check))[0])\r\nfig, res = plt.subplots(1,2, constrained_layout=True)\r\nres[0].imshow(datum.numpy(), cmap=\"binary\")\r\nres[0].set_title(\"input\")\r\nres[0].axis(\"off\")\r\n\r\nwith torch.no_grad():\r\n out = model(datum.unsqueeze(0))[0][0]\r\n res[1].imshow(out.cpu().numpy(), cmap=\"binary\")\r\nres[1].axis(\"off\")\r\nres[1].set_title(\"output\")\r\nplt.show()\r\n\r\n", "repo_name": "Ido220694/LSTM-Auto-Encoder", "sub_path": "lstm_ae_mnist.py", "file_name": "lstm_ae_mnist.py", "file_ext": "py", "file_size_in_byte": 8024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 100, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 122, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 133, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 133, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 133, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 133, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 134, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 134, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 135, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 153, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 158, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 169, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}]}
+{"seq_id": "30409010081", "text": "import logging\nimport os\nimport re\nimport shutil\nimport subprocess\n\nfrom abstract_publisher import AbstractPublisher\nfrom json_helpers import json_load, json_dump\nfrom package_version_rewriter import update_package_json_versions, rewrite_shrinkwrap_file\n\nclass NpmPublisher(AbstractPublisher):\n ''' Reads and publishes npm packages assuming an incrementing revision number rather than using\n full-fledged semver. Nuclide packages are developed as a consistent set, with 0.0.0 as the\n version that is always stored in source control, but with a 0.0.x scheme that is used for\n the versions published to npm.\n '''\n\n _version_regex = re.compile('^0\\.0\\.(\\d+)$')\n\n def __init__(self, config, npm, tmpdir, transpiler, boilerplate_files):\n self._config = config\n self._npm = npm\n self._tmpdir = os.path.join(tmpdir, 'npm')\n self._tmp_package = os.path.join(self._tmpdir, self.get_package_name())\n self._transpiler = transpiler\n self._boilerplate_files = boilerplate_files\n\n def get_package_name(self):\n return self._config.package_name\n\n def is_already_published(self, target_version):\n semver = '0.0.%s' % target_version\n return self._npm.is_published(self.get_package_name(), semver)\n\n def get_published_version(self):\n ''' Reads the `npm info` of the package, gets the current version (of the form 0.0.x)\n and then returns the incrementing version, integer x.\n '''\n logging.info('Attempting to determine version of %s in npm', self.get_package_name())\n\n # We often call this multiple times to check publication progress, so force non-memoization.\n semver = self._npm.info(self._config.package_directory, force=True).get('version', '')\n\n match = self._version_regex.match(semver)\n if match:\n version = int(match.group(1))\n logging.info('Version of %s is %d', self.get_package_name(), version)\n else:\n version = 0\n logging.warning('Version of %s is not available; defaulting to 0' %\n self.get_package_name())\n return version\n\n def is_published_version(self, target_version):\n return self.get_published_version() == target_version\n\n def prepublish(self, new_version, atom_semver):\n logging.info('Publishing %s to npm at version %s', self.get_package_name(), new_version)\n\n # Create temporary directory and copy package into it (without dependencies).\n package = self._config.package_directory\n logging.info('Copying %s to tmpdir', self.get_package_name())\n shutil.copytree(package, self._tmp_package, ignore=shutil.ignore_patterns('node_modules'))\n\n # Make sure that standard boilerplate files are included in the repo.\n for name, src in self._boilerplate_files.items():\n shutil.copyfile(\n src,\n os.path.join(self._tmp_package, name))\n\n # Load package.json and rewrite version number within it.\n package_file = os.path.join(self._tmp_package, 'package.json')\n package = json_load(package_file)\n package = update_package_json_versions(self.get_package_name(), package,\n self._config.nuclide_npm_package_names, new_version)\n\n # Delete \"_atomModuleCache\" field from package.json.\n # TODO (chenshen): delete following line once '_atomModuleCache' is not fake.\n if '_atomModuleCache' in package:\n del package['_atomModuleCache']\n\n # Specify the license if it is not already specified.\n if 'license' not in package:\n package['license'] = 'SEE LICENSE IN LICENSE'\n\n # Write the adjusted package file back to the temporary directory and publish it.\n json_dump(package, package_file)\n\n # Pre-transpile Babel files, as appropriate.\n self._transpiler.transpile_in_place(self.get_package_name(), self._tmp_package)\n\n rewrite_shrinkwrap_file(self._tmp_package,\n package, self._config.nuclide_npm_package_names, new_version)\n\n def publish(self, new_version, atom_semver):\n try:\n self._npm.publish(self._tmp_package)\n except subprocess.CalledProcessError:\n logging.error('FAILED to publish package %s at version %d; it may already be published',\n self.get_package_name(), new_version)\n", "repo_name": "zgao/as-nuclide", "sub_path": "scripts/lib/publishers/npm_publisher.py", "file_name": "npm_publisher.py", "file_ext": "py", "file_size_in_byte": 4422, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "73", "api": [{"api_name": "abstract_publisher.AbstractPublisher", "line_number": 11, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 62, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 63, "usage_type": "call"}, {"api_name": "shutil.ignore_patterns", "line_number": 63, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "json_helpers.json_load", "line_number": 73, "usage_type": "call"}, {"api_name": "package_version_rewriter.update_package_json_versions", "line_number": 74, "usage_type": "call"}, {"api_name": "json_helpers.json_dump", "line_number": 87, "usage_type": "call"}, {"api_name": "package_version_rewriter.rewrite_shrinkwrap_file", "line_number": 92, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 98, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 99, "usage_type": "call"}]}
+{"seq_id": "15720959109", "text": "# -*- coding: utf-8 -*-\n\nimport os.path\nfrom ast import literal_eval\nfrom functools import partial\n\nfrom qgis.PyQt import uic\nfrom qgis.PyQt.QtWidgets import (\n QAbstractItemView,\n QAction,\n QFrame,\n QHeaderView,\n QListWidgetItem,\n QMessageBox,\n QTableWidgetItem,\n)\nfrom qgis.PyQt.QtGui import QColor, QIcon\nfrom qgis.PyQt.QtCore import QSize, Qt\nfrom qgis.core import QgsProject\nfrom qgis.gui import QgsMessageBar\nfrom qgis.utils import Qgis, iface\n\nfrom buildings.gui import bulk_load_changes\nfrom buildings.gui.error_dialog import ErrorDialog\nfrom buildings.gui.edit_dialog import EditDialog\nfrom buildings.gui.deletion_reason_dialog import DeletionReason\nfrom buildings.utilities import database as db\nfrom buildings.sql import buildings_bulk_load_select_statements as bulk_load_select\nfrom buildings.sql import buildings_select_statements as buildings_select\nfrom buildings.sql import general_select_statements as general_select\nfrom buildings.utilities import circle_tool\nfrom buildings.utilities.layers import LayerRegistry\nfrom buildings.utilities.multi_layer_selection import MultiLayerSelection\nfrom buildings.utilities.point_tool import PointTool\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nFORM_CLASS, _ = uic.loadUiType(\n os.path.join(os.path.dirname(__file__), \"alter_building_relationship.ui\")\n)\n\n\nclass AlterRelationships(QFrame, FORM_CLASS):\n def __init__(self, dockwidget, current_dataset, parent=None):\n \"\"\"Constructor.\"\"\"\n\n # Attributes set in subsequent methods\n self.message_bar_edit = None\n self.message_bar_qa = None\n self.add_action = None\n self.edit_geom_action = None\n self.edit_attrs_action = None\n self.lyr_related_existing = None\n self.lyr_related_bulk_load = None\n self.lyr_matched_existing = None\n self.lyr_matched_bulk_load = None\n self.lyr_removed_existing = None\n self.lyr_added_bulk_load = None\n self.lyr_related_bulk_load_in_edit = None\n self.lyr_related_existing_in_edit = None\n self.lyr_matched_bulk_load_in_edit = None\n self.lyr_matched_existing_in_edit = None\n self.lyr_removed_existing_in_edit = None\n self.lyr_added_bulk_load_in_edit = None\n self.lyr_existing = None\n self.lyr_bulk_load = None\n self.lyr_facilities = None\n self.msgbox = None\n self.tool = None\n self.reason_text = None\n self.circle_tool = None\n self.polyline = None\n self.circle_action = None\n\n super(AlterRelationships, self).__init__(parent)\n self.setupUi(self)\n\n self.db = db\n self.db.connect()\n\n self.valid_building_uses = {\n None: \"None\",\n **{\n use_id: use\n for use_id, use in self.db.execute_return(\n \"SELECT * FROM buildings.use;\"\n ).fetchall()\n },\n }\n\n self.dockwidget = dockwidget\n self.layer_registry = LayerRegistry()\n self.current_dataset = current_dataset\n self.error_dialog = None\n self.autosave = False\n self.delete = False\n self.deletion_reason = None\n self.zoom = True\n self.attributes_changed = False\n\n self.frame_setup()\n self.layers_setup()\n self.edit_dialog = EditDialog(self)\n self.change_instance = None\n self.toolbar_setup()\n self.connect_signals()\n\n def frame_setup(self):\n\n self.message_bar_edit = QgsMessageBar()\n self.layout_msg_bar_edit.addWidget(self.message_bar_edit)\n self.message_bar_qa = QgsMessageBar()\n self.layout_msg_bar_qa.addWidget(self.message_bar_qa)\n\n self.btn_qa_not_removed.setIcon(\n QIcon(os.path.join(__location__, \"..\", \"icons\", \"match.png\"))\n )\n self.btn_next.setIcon(\n QIcon(os.path.join(__location__, \"..\", \"icons\", \"next.png\"))\n )\n self.btn_maptool.setIcon(\n QIcon(\n os.path.join(\n __location__, \"..\", \"icons\", \"multi_layer_selection_tool.png\"\n )\n )\n )\n\n self.cbox_use.insertItems(0, self.valid_building_uses.values())\n\n self.maptool_clicked()\n self.reset_buttons()\n self.btn_next.setEnabled(False)\n self.qa_button_set_enable(False)\n self.btn_qa_not_removed.setEnabled(False)\n self.populate_cmb_relationship()\n self.setup_message_box()\n\n def layers_setup(self):\n self.add_building_lyrs()\n self.repaint_view()\n self.clear_layer_filter()\n iface.setActiveLayer(self.lyr_bulk_load)\n\n def toolbar_setup(self):\n\n if \"Add Outline\" not in (\n action.text() for action in iface.building_toolbar.actions()\n ):\n image_dir = os.path.join(__location__, \"..\", \"icons\")\n icon_path = os.path.join(image_dir, \"plus.png\")\n icon = QIcon()\n icon.addFile(icon_path, QSize(8, 8))\n self.add_action = QAction(icon, \"Add Outline\", iface.building_toolbar)\n iface.registerMainWindowAction(self.add_action, \"Ctrl+1\")\n self.add_action.triggered.connect(self.canvas_add_outline)\n iface.building_toolbar.addAction(self.add_action)\n\n if \"Edit Geometry\" not in (\n action.text() for action in iface.building_toolbar.actions()\n ):\n image_dir = os.path.join(__location__, \"..\", \"icons\")\n icon_path = os.path.join(image_dir, \"edit_geometry.png\")\n icon = QIcon()\n icon.addFile(icon_path, QSize(8, 8))\n self.edit_geom_action = QAction(\n icon, \"Edit Geometry\", iface.building_toolbar\n )\n iface.registerMainWindowAction(self.edit_geom_action, \"Ctrl+2\")\n self.edit_geom_action.triggered.connect(self.canvas_edit_geometry)\n iface.building_toolbar.addAction(self.edit_geom_action)\n\n if \"Edit Attributes\" not in (\n action.text() for action in iface.building_toolbar.actions()\n ):\n image_dir = os.path.join(__location__, \"..\", \"icons\")\n icon_path = os.path.join(image_dir, \"edit_attributes.png\")\n icon = QIcon()\n icon.addFile(icon_path, QSize(8, 8))\n self.edit_attrs_action = QAction(\n icon, \"Edit Attributes\", iface.building_toolbar\n )\n iface.registerMainWindowAction(self.edit_attrs_action, \"Ctrl+3\")\n self.edit_attrs_action.triggered.connect(self.canvas_edit_attribute)\n iface.building_toolbar.addAction(self.edit_attrs_action)\n\n iface.building_toolbar.show()\n\n def connect_signals(self):\n\n self.dockwidget.closed.connect(self.on_dockwidget_closed)\n\n self.btn_qa_okay.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Okay\", commit_status=True)\n )\n self.btn_qa_pending.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Pending\", commit_status=True)\n )\n self.btn_qa_refer2supplier.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Refer to Supplier\", commit_status=True)\n )\n self.btn_qa_not_checked.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Not Checked\", commit_status=True)\n )\n self.btn_qa_not_removed.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Not Removed\", commit_status=True)\n )\n self.btn_next.clicked.connect(self.zoom_to_next)\n self.btn_maptool.clicked.connect(self.maptool_clicked)\n self.btn_unlink.clicked.connect(\n partial(self.unlink_clicked, commit_status=True)\n )\n self.btn_matched.clicked.connect(\n partial(self.matched_clicked, commit_status=True)\n )\n self.btn_related.clicked.connect(\n partial(self.related_clicked, commit_status=True)\n )\n self.btn_delete.clicked.connect(\n partial(self.delete_clicked, commit_status=True)\n )\n self.btn_copy_from_existing.clicked.connect(\n self.on_click_btn_copy_from_existing\n )\n self.btn_set_attributes.clicked.connect(self.on_click_btn_set_attributes)\n self.btn_delete_attributes.clicked.connect(self.on_click_btn_delete_attributes)\n self.btn_save.clicked.connect(partial(self.save_clicked, commit_status=True))\n self.btn_cancel.clicked.connect(self.cancel_clicked)\n self.btn_exit.clicked.connect(self.exit_clicked)\n\n self.cmb_relationship.currentIndexChanged.connect(\n self.cmb_relationship_current_index_changed\n )\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n\n self.cb_lyr_bulk_load.stateChanged.connect(self.cb_lyr_bulk_load_state_changed)\n self.cb_lyr_existing.stateChanged.connect(self.cb_lyr_existing_state_changed)\n\n self.cb_autosave.stateChanged.connect(self.cb_autosave_state_changed)\n\n QgsProject.instance().layerWillBeRemoved.connect(self.layers_removed)\n\n def add_building_lyrs(self):\n \"\"\"Add building layers\"\"\"\n\n path = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"styles/\")\n\n self.lyr_facilities = self.layer_registry.add_postgres_layer(\n \"facilities\",\n \"facilities\",\n \"shape\",\n \"facilities\",\n \"\",\n \"\",\n )\n self.lyr_facilities.loadNamedStyle(path + \"facilities.qml\")\n\n self.lyr_related_existing = self.layer_registry.add_postgres_layer(\n \"related_existing_outlines\",\n \"related_existing_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_related_existing.loadNamedStyle(path + \"building_purple_existing.qml\")\n\n self.lyr_related_bulk_load = self.layer_registry.add_postgres_layer(\n \"related_bulk_load_outlines\",\n \"related_bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_related_bulk_load.loadNamedStyle(path + \"building_purple.qml\")\n\n self.lyr_matched_existing = self.layer_registry.add_postgres_layer(\n \"matched_existing_outlines\",\n \"matched_existing_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_matched_existing.loadNamedStyle(path + \"building_blue_existing.qml\")\n\n self.lyr_matched_bulk_load = self.layer_registry.add_postgres_layer(\n \"matched_bulk_load_outlines\",\n \"matched_bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_matched_bulk_load.loadNamedStyle(path + \"building_blue.qml\")\n\n self.lyr_removed_existing = self.layer_registry.add_postgres_layer(\n \"removed_outlines\",\n \"removed_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_removed_existing.loadNamedStyle(path + \"building_red_existing.qml\")\n\n self.lyr_added_bulk_load = self.layer_registry.add_postgres_layer(\n \"added_outlines\",\n \"added_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_added_bulk_load.loadNamedStyle(path + \"building_green.qml\")\n\n self.lyr_related_bulk_load_in_edit = self.layer_registry.add_postgres_layer(\n \"related_bulk_load_in_edit\",\n \"bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_related_bulk_load_in_edit.loadNamedStyle(path + \"building_purple.qml\")\n\n self.lyr_related_existing_in_edit = self.layer_registry.add_postgres_layer(\n \"related_existing_in_edit\",\n \"existing_subset_extracts\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_related_existing_in_edit.loadNamedStyle(path + \"building_purple.qml\")\n\n self.lyr_matched_bulk_load_in_edit = self.layer_registry.add_postgres_layer(\n \"matched_bulk_load_in_edit\",\n \"bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_matched_bulk_load_in_edit.loadNamedStyle(path + \"building_blue.qml\")\n\n self.lyr_matched_existing_in_edit = self.layer_registry.add_postgres_layer(\n \"matched_existing_in_edit\",\n \"existing_subset_extracts\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_matched_existing_in_edit.loadNamedStyle(path + \"building_blue.qml\")\n\n self.lyr_removed_existing_in_edit = self.layer_registry.add_postgres_layer(\n \"removed_existing_in_edit\",\n \"existing_subset_extracts\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_removed_existing_in_edit.loadNamedStyle(path + \"building_red.qml\")\n\n self.lyr_added_bulk_load_in_edit = self.layer_registry.add_postgres_layer(\n \"added_bulk_load_in_edit\",\n \"bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_added_bulk_load_in_edit.loadNamedStyle(path + \"building_green.qml\")\n\n self.lyr_existing = self.layer_registry.add_postgres_layer(\n \"existing_subset_extracts\",\n \"existing_subset_extracts\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"supplied_dataset_id = {0}\".format(self.current_dataset),\n )\n self.lyr_existing.loadNamedStyle(path + \"building_transparent.qml\")\n\n self.lyr_bulk_load = self.layer_registry.add_postgres_layer(\n \"bulk_load_outlines\",\n \"bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"supplied_dataset_id = {0}\".format(self.current_dataset),\n )\n self.lyr_bulk_load.loadNamedStyle(path + \"buildings_bulk_load_alter_rel.qml\")\n\n def repaint_view(self):\n \"\"\"Repaint views to update changes in result\"\"\"\n self.lyr_added_bulk_load.triggerRepaint()\n self.lyr_removed_existing.triggerRepaint()\n self.lyr_matched_bulk_load.triggerRepaint()\n self.lyr_matched_existing.triggerRepaint()\n self.lyr_related_bulk_load.triggerRepaint()\n self.lyr_related_existing.triggerRepaint()\n\n def clear_layer_filter(self):\n \"\"\"Returns 'null' filter for layers\"\"\"\n self.lyr_added_bulk_load_in_edit.setSubsetString(\"null\")\n self.lyr_removed_existing_in_edit.setSubsetString(\"null\")\n self.lyr_matched_existing_in_edit.setSubsetString(\"null\")\n self.lyr_matched_bulk_load_in_edit.setSubsetString(\"null\")\n self.lyr_related_existing_in_edit.setSubsetString(\"null\")\n self.lyr_related_bulk_load_in_edit.setSubsetString(\"null\")\n\n self.lyr_added_bulk_load.setSubsetString(\"\")\n self.lyr_removed_existing.setSubsetString(\"\")\n self.lyr_matched_existing.setSubsetString(\"\")\n self.lyr_matched_bulk_load.setSubsetString(\"\")\n self.lyr_related_existing.setSubsetString(\"\")\n self.lyr_related_bulk_load.setSubsetString(\"\")\n\n def setup_message_box(self):\n self.msgbox = QMessageBox(\n QMessageBox.Question,\n \"Auto-save\",\n \"Are you sure you want to turn on auto-save?\",\n buttons=QMessageBox.No | QMessageBox.Yes,\n )\n\n def on_dockwidget_closed(self):\n \"\"\"Remove highlight when the dockwideget closes\"\"\"\n pass\n\n def maptool_clicked(self):\n canvas = iface.mapCanvas()\n self.tool = MultiLayerSelection(canvas)\n canvas.setMapTool(self.tool)\n # set up signal and slot\n self.tool.multi_selection_changed.connect(self.multi_selection_changed)\n\n def multi_selection_changed(self):\n\n self.tbl_relationship.itemSelectionChanged.disconnect(\n self.tbl_relationship_item_selection_changed\n )\n self.tbl_relationship.clearSelection()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.reset_buttons()\n\n selected_bulk = [feat.id() for feat in self.lyr_bulk_load.selectedFeatures()]\n selected_existing = [feat.id() for feat in self.lyr_existing.selectedFeatures()]\n\n has_multi_set = False\n has_added, has_removed, has_matched, has_related = False, False, False, False\n existing_to_lst, bulk_to_list = [], []\n bulk_attr_to_list = []\n\n for feat_id in selected_bulk:\n if feat_id in bulk_to_list:\n continue\n id_added = self.find_added_outlines(feat_id)\n id_matched = self.find_matched_existing_outlines(feat_id)\n ids_existing, ids_bulk = self.find_related_existing_outlines(feat_id)\n if id_added:\n if has_matched or has_related:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n bulk_to_list.append(feat_id)\n bulk_attr_to_list.append(id_added)\n has_added = True\n elif id_matched:\n if has_added or has_removed or has_related:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n if has_matched:\n has_multi_set = True\n existing_to_lst = [id_matched[0]]\n bulk_to_list = [feat_id]\n has_matched = True\n elif ids_existing and ids_bulk:\n if has_added or has_removed or has_matched:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n if has_related:\n has_multi_set = True\n existing_to_lst = ids_existing\n bulk_to_list = ids_bulk\n has_related = True\n\n for feat_id in selected_existing:\n if feat_id in existing_to_lst:\n continue\n id_removed = self.find_removed_outlines(feat_id)\n id_matched = self.find_matched_bulk_load_outlines(feat_id)\n ids_existing, ids_bulk = self.find_related_bulk_load_outlines(feat_id)\n if id_removed:\n if has_matched or has_related:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n existing_to_lst.append(feat_id)\n has_removed = True\n elif id_matched:\n if has_added or has_removed or has_related:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n if has_matched:\n has_multi_set = True\n existing_to_lst = [feat_id]\n bulk_to_list = [id_matched[1]]\n has_matched = True\n elif ids_existing and ids_bulk:\n if has_added or has_removed or has_matched:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n if has_related:\n has_multi_set = True\n existing_to_lst = ids_existing\n bulk_to_list = ids_bulk\n has_related = True\n self.insert_into_list(self.lst_existing, existing_to_lst)\n self.insert_into_list(self.lst_bulk, bulk_to_list)\n self.disable_listwidget(self.lst_existing)\n self.disable_listwidget(self.lst_bulk)\n self.lyr_existing.selectByIds(existing_to_lst)\n self.lyr_bulk_load.selectByIds(bulk_to_list)\n\n # error msg when more than one set of matched or related set are selected\n if has_multi_set:\n self.message_bar_edit.pushMessage(\n \"Multiple matched or related sets selected, can only unlink one at a time.\"\n )\n # switch button\n if has_matched or has_related:\n self.btn_unlink.setEnabled(True)\n self.btn_copy_from_existing.setEnabled(True)\n self.btn_set_attributes.setEnabled(True)\n self.btn_delete_attributes.setEnabled(True)\n self.ledit_name.setEnabled(True)\n self.cbox_use.setEnabled(True)\n self.switch_btn_match_and_related()\n elif has_added and has_removed:\n self.switch_btn_match_and_related()\n elif has_added and not has_removed:\n self.btn_delete.setEnabled(True)\n self.btn_set_attributes.setEnabled(True)\n self.btn_delete_attributes.setEnabled(True)\n self.ledit_name.setEnabled(True)\n self.cbox_use.setEnabled(True)\n # select rows in tbl_relationship\n self.tbl_relationship.setSelectionMode(QAbstractItemView.MultiSelection)\n if has_removed:\n for id_existing in existing_to_lst:\n self.select_row_in_tbl_removed(id_existing)\n elif has_added:\n for id_bulk in bulk_to_list:\n self.select_row_in_tbl_added(id_bulk)\n elif has_matched:\n self.select_row_in_tbl_matched(existing_to_lst[0], bulk_to_list[0])\n elif has_related:\n for id_existing in existing_to_lst:\n for id_bulk in bulk_to_list:\n self.select_row_in_tbl_related(id_existing, id_bulk)\n self.tbl_relationship.setSelectionMode(QAbstractItemView.SingleSelection)\n\n # Add attributes to list for displaying\n if has_removed:\n for id_ in existing_to_lst:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n existing_use = self.tbl_relationship.item(row, 2).text()\n existing_name = self.tbl_relationship.item(row, 3).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n # if removed and added selected, then alternative extraction of attributes required due to different tables\n if has_added:\n attr_dict = {}\n for item in bulk_attr_to_list:\n added_id = int(\n item[0].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")[0]\n )\n added_use = item[1]\n added_name = item[2]\n attr_dict[added_id] = [added_use, added_name]\n for id_ in bulk_to_list:\n bulk_use = attr_dict[id_][0]\n bulk_name = attr_dict[id_][1]\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_use, bulk_name]]\n )\n self.update_attr_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n elif has_added:\n for id_ in bulk_to_list:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n bulk_use = self.tbl_relationship.item(row, 1).text()\n bulk_name = self.tbl_relationship.item(row, 2).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_use, bulk_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n elif has_matched:\n for id_ in existing_to_lst:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n existing_use = self.tbl_relationship.item(row, 3).text()\n existing_name = self.tbl_relationship.item(row, 4).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n for id_ in bulk_to_list:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 1).text()):\n bulk_load_use = self.tbl_relationship.item(row, 5).text()\n bulk_load_name = self.tbl_relationship.item(row, 6).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_load_use, bulk_load_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#00b4d4\"), QColor(\"#00b4d4\"))\n elif has_related:\n for id_ in existing_to_lst:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 1).text()):\n existing_use = self.tbl_relationship.item(row, 4).text()\n existing_name = self.tbl_relationship.item(row, 5).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n for id_ in bulk_to_list:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 2).text()):\n bulk_load_use = self.tbl_relationship.item(row, 6).text()\n bulk_load_name = self.tbl_relationship.item(row, 7).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_load_use, bulk_load_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#e601ff\"), QColor(\"#e601ff\"))\n\n # Change item color in the list\n if has_removed or has_added:\n self.update_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n elif has_matched:\n self.update_list_item_color(QColor(\"#00b4d4\"), QColor(\"#00b4d4\"))\n elif has_related:\n self.update_list_item_color(QColor(\"#e601ff\"), QColor(\"#e601ff\"))\n\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n\n def unfinished_error_msg(self):\n self.error_dialog = ErrorDialog()\n self.error_dialog.fill_report(\n \"\\n------------- UNFINISHED PROCESS -------------\"\n \"\\n\\nPlease click Save or Cancel to finish before continuing.\"\n )\n self.error_dialog.show()\n\n def unlink_clicked(self, commit_status=True):\n \"\"\"\n Unlink the buildings in the table\n Called when unlink_all botton is clicked\n \"\"\"\n self.btn_unlink.setEnabled(False)\n self.btn_maptool.setEnabled(False)\n self.qa_button_set_enable(False)\n\n ids_existing = self.get_ids_from_lst(self.lst_existing)\n ids_bulk = self.get_ids_from_lst(self.lst_bulk)\n self.insert_into_lyr_removed_in_edit(ids_existing)\n self.insert_into_lyr_added_in_edit(ids_bulk)\n\n self.connect_to_error_msg()\n\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def matched_clicked(self, commit_status=True):\n \"\"\"\n Match the buildings in the list\n Called when matched botton is clicked\n \"\"\"\n if self.lst_existing.count() == 1 and self.lst_bulk.count() == 1:\n self.btn_matched.setEnabled(False)\n self.btn_delete.setEnabled(False)\n self.btn_maptool.setEnabled(False)\n self.qa_button_set_enable(False)\n\n id_existing = int(self.lst_existing.item(0).text())\n id_bulk = int(self.lst_bulk.item(0).text())\n\n self.insert_into_lyr_matched_existing_in_edit(id_existing)\n self.insert_into_lyr_matched_bulk_load_in_edit(id_bulk)\n\n self.delete_original_relationship_in_existing(id_existing)\n self.delete_original_relationship_in_bulk_load(id_bulk)\n\n self.connect_to_error_msg()\n\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def related_clicked(self, commit_status=True):\n \"\"\"\n Relate the buildings in the list\n Called when related botton is clicked\n \"\"\"\n if self.lst_existing.count() == 0 or self.lst_bulk.count() == 0:\n pass\n elif self.lst_existing.count() == 1 and self.lst_bulk.count() == 1:\n pass\n else:\n self.btn_related.setEnabled(False)\n self.btn_delete.setEnabled(False)\n self.btn_maptool.setEnabled(False)\n self.qa_button_set_enable(False)\n\n for row in range(self.lst_existing.count()):\n id_existing = int(self.lst_existing.item(row).text())\n\n self.insert_into_lyr_related_existing_in_edit(id_existing)\n self.delete_original_relationship_in_existing(id_existing)\n\n for row in range(self.lst_bulk.count()):\n id_bulk = int(self.lst_bulk.item(row).text())\n\n self.insert_into_lyr_related_bulk_load_in_edit(id_bulk)\n self.delete_original_relationship_in_bulk_load(id_bulk)\n\n self.connect_to_error_msg()\n\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def delete_clicked(self, commit_status=True):\n self.deletion_reason = DeletionReason(self.lst_bulk.count())\n self.deletion_reason.show()\n self.deletion_reason.btn_ok.clicked.connect(\n partial(self.reason_given, commit_status)\n )\n self.deletion_reason.btn_cancel.clicked.connect(self.reason_cancel)\n\n def reason_given(self, commit_status):\n self.deletion_reason.close()\n if self.deletion_reason.le_reason.text() != \"\":\n self.btn_matched.setEnabled(False)\n self.btn_related.setEnabled(False)\n self.delete = True\n self.reason_text = self.deletion_reason.le_reason.text()\n self.connect_to_error_msg()\n self.btn_delete.setEnabled(False)\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n else:\n iface.messageBar().pushMessage(\n \"ERROR\",\n \"Please ensure that you enter a reason for deletion, you cannot delete a building otherwise.\",\n level=Qgis.Info,\n duration=5,\n )\n\n def reason_cancel(self):\n self.deletion_reason.close()\n\n def on_click_btn_copy_from_existing(self):\n selected_existing_outlines = self.get_lst_content(self.lst_existing_attrs)\n existing_uses = [row[1][1] for row in selected_existing_outlines]\n existing_names = [row[1][2] for row in selected_existing_outlines]\n non_null_pairs = [\n pair\n for pair in zip(existing_uses, existing_names)\n if pair != (\"None\", \"None\")\n ]\n if non_null_pairs:\n existing_use = non_null_pairs[0][0]\n use_id = self.valid_building_use_ids[existing_use]\n self.cbox_use.setCurrentIndex(use_id)\n existing_name = non_null_pairs[0][1]\n self.ledit_name.setText(existing_name)\n else:\n self.cbox_use.setCurrentIndex(0)\n self.ledit_name.setText(\"\")\n\n def on_click_btn_set_attributes(self, commit_status=True):\n use = self.cbox_use.currentText()\n name = self.ledit_name.text()\n name = \"None\" if name == \"\" else name\n if use == \"None\" and name != \"None\":\n self.error_dialog = ErrorDialog()\n self.error_dialog.fill_report(\n \"An outline cannot have a name without a use. Please select a value for use.\"\n )\n self.error_dialog.show()\n return\n for row_id, row_content in self.get_lst_content(self.lst_bulk_attrs):\n updated_row_content = [row_content[0], use, name]\n self.lst_bulk_attrs.item(row_id).setText(str(updated_row_content))\n self.connect_to_error_msg()\n self.attributes_changed = True\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def on_click_btn_delete_attributes(self, commit_status=True):\n for row_id, row_content in self.get_lst_content(self.lst_bulk_attrs):\n updated_row_content = [row_content[0], \"None\", \"None\"]\n self.lst_bulk_attrs.item(row_id).setText(str(updated_row_content))\n self.connect_to_error_msg()\n self.attributes_changed = True\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def save_clicked(self, commit_status=True):\n \"\"\"\n Save result and change database\n Called when save button is clicked\n \"\"\"\n self.db.open_cursor()\n\n if self.delete:\n for row in range(self.lst_bulk.count()):\n feat_id = int(self.lst_bulk.item(row).text())\n # remove outline from added table\n sql = \"SELECT buildings_bulk_load.added_delete_bulk_load_outlines(%s);\"\n self.db.execute_no_commit(sql, (feat_id,))\n # change status id\n sql = \"SELECT buildings_bulk_load.bulk_load_outlines_update_bulk_load_status(%s, %s);\"\n self.db.execute_no_commit(sql, (feat_id, 3))\n # insert reason for deletion\n sql = \"SELECT buildings_bulk_load.deletion_description_insert(%s, %s);\"\n self.db.execute_no_commit(sql, (feat_id, self.reason_text))\n self.reason_text = \"\"\n self.delete = False\n elif self.attributes_changed:\n self.update_bulkload_attributes()\n self.attributes_changed = False\n else:\n self.delete_original_relationships()\n self.insert_new_added_outlines()\n self.insert_new_removed_outlines()\n self.insert_new_matched_outlines()\n self.insert_new_related_outlines()\n\n if commit_status:\n self.db.commit_open_cursor()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.reset_buttons()\n self.qa_button_set_enable(True)\n\n self.disconnect_to_error_msg()\n\n self.repaint_view()\n self.clear_layer_filter()\n iface.mapCanvas().refreshAllLayers()\n\n self.refresh_tbl_relationship()\n\n def cancel_clicked(self):\n self.reset_buttons()\n self.qa_button_set_enable(True)\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n try:\n self.disconnect_to_error_msg()\n except TypeError:\n pass\n\n self.repaint_view()\n self.clear_layer_filter()\n iface.mapCanvas().refreshAllLayers()\n\n def exit_clicked(self):\n \"\"\"\n Called when alter building relationships exit button clicked.\n \"\"\"\n self.close_frame()\n\n def close_frame(self):\n \"\"\"\n Clean up and remove the alter building relationships frame.\n \"\"\"\n self.reset_buttons()\n self.qa_button_set_enable(True)\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n if self.change_instance is not None:\n self.edit_dialog.close()\n\n QgsProject.instance().layerWillBeRemoved.disconnect(self.layers_removed)\n for val in [\n str(layer.id())\n for layer in QgsProject.instance().layerTreeRoot().layerOrder()\n ]:\n if \"existing_subset_extracts\" in val:\n self.lyr_existing.removeSelection()\n if \"bulk_load_outlines\" in val:\n self.lyr_bulk_load.removeSelection()\n try:\n self.disconnect_to_error_msg()\n except TypeError:\n pass\n\n self.layer_registry.remove_layer(self.lyr_existing)\n self.layer_registry.remove_layer(self.lyr_bulk_load)\n self.layer_registry.remove_layer(self.lyr_added_bulk_load)\n self.layer_registry.remove_layer(self.lyr_removed_existing)\n self.layer_registry.remove_layer(self.lyr_matched_existing)\n self.layer_registry.remove_layer(self.lyr_matched_bulk_load)\n self.layer_registry.remove_layer(self.lyr_related_bulk_load)\n self.layer_registry.remove_layer(self.lyr_related_existing)\n self.layer_registry.remove_layer(self.lyr_added_bulk_load_in_edit)\n self.layer_registry.remove_layer(self.lyr_removed_existing_in_edit)\n self.layer_registry.remove_layer(self.lyr_matched_existing_in_edit)\n self.layer_registry.remove_layer(self.lyr_matched_bulk_load_in_edit)\n self.layer_registry.remove_layer(self.lyr_related_existing_in_edit)\n self.layer_registry.remove_layer(self.lyr_related_bulk_load_in_edit)\n self.layer_registry.remove_layer(self.lyr_facilities)\n\n for action in iface.building_toolbar.actions():\n if action.text() not in [\"Pan Map\"]:\n iface.building_toolbar.removeAction(action)\n iface.building_toolbar.hide()\n\n from buildings.gui.bulk_load_frame import BulkLoadFrame\n\n dw = self.dockwidget\n dw.stk_options.removeWidget(dw.stk_options.currentWidget())\n dw.new_widget(BulkLoadFrame(dw))\n iface.actionPan().trigger()\n\n def cmb_relationship_current_index_changed(self):\n current_text = self.cmb_relationship.currentText()\n if current_text == \"Related Outlines\":\n self.init_tbl_relationship(\n [\n \"Group\",\n \"Exist ID\",\n \"Bulk ID\",\n \"QA Status\",\n \"Exist Use\",\n \"Exist Name\",\n \"Bulk Use\",\n \"Bulk Name\",\n ]\n )\n self.populate_tbl_related()\n self.btn_next.setEnabled(True)\n self.btn_qa_not_removed.setEnabled(False)\n if self.is_empty_tbl_relationship(\"Related Outlines\"):\n self.qa_button_set_enable(False)\n else:\n self.qa_button_set_enable(True)\n elif current_text == \"Matched Outlines\":\n self.init_tbl_relationship(\n [\n \"Exist ID\",\n \"Bulk ID\",\n \"QA Status\",\n \"Exist Use\",\n \"Exist Name\",\n \"Bulk Use\",\n \"Bulk Name\",\n ]\n )\n self.populate_tbl_matched()\n self.btn_next.setEnabled(True)\n self.btn_qa_not_removed.setEnabled(False)\n if self.is_empty_tbl_relationship(\"Matched Outlines\"):\n self.qa_button_set_enable(False)\n else:\n self.qa_button_set_enable(True)\n elif current_text == \"Removed Outlines\":\n self.init_tbl_relationship(\n [\"Exist ID\", \"QA Status\", \"Exist Use\", \"Exist Name\"]\n )\n self.populate_tbl_removed()\n self.btn_next.setEnabled(True)\n self.btn_qa_not_removed.setEnabled(True)\n if self.is_empty_tbl_relationship(\"Removed Outlines\"):\n self.qa_button_set_enable(False)\n self.btn_qa_not_removed.setEnabled(False)\n else:\n self.qa_button_set_enable(True)\n elif current_text == \"Added Outlines\":\n self.init_tbl_relationship([\"Bulk ID\", \"Bulk Use\", \"Bulk Name\"])\n self.populate_tbl_added()\n self.btn_qa_not_removed.setEnabled(False)\n self.qa_button_set_enable(False)\n self.btn_next.setEnabled(False)\n\n elif current_text == \"\":\n self.tbl_relationship.setRowCount(0)\n self.tbl_relationship.setColumnCount(0)\n self.qa_button_set_enable(False)\n self.btn_qa_not_removed.setEnabled(False)\n self.btn_next.setEnabled(False)\n\n self.disable_tbl_editing(self.tbl_relationship)\n\n def tbl_relationship_item_selection_changed(self):\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n if self.has_no_selection_in_table(self.tbl_relationship):\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n return\n\n row = self.tbl_relationship.selectionModel().selectedRows()[0].row()\n current_text = self.cmb_relationship.currentText()\n\n if current_text == \"Related Outlines\":\n id_existing = int(self.tbl_relationship.item(row, 1).text())\n id_bulk = int(self.tbl_relationship.item(row, 2).text())\n ids_existing, ids_bulk = self.find_related_existing_outlines(id_bulk)\n self.insert_into_list(self.lst_existing, ids_existing)\n self.insert_into_list(self.lst_bulk, ids_bulk)\n self.update_list_item_color(QColor(\"#e601ff\"), QColor(\"#e601ff\"))\n self.lyr_existing.selectByIds(ids_existing)\n self.lyr_bulk_load.selectByIds(ids_bulk)\n self.btn_unlink.setEnabled(True)\n\n # Add related attributes to list for displaying\n for id_ in ids_existing:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 1).text()):\n existing_use = self.tbl_relationship.item(row, 4).text()\n existing_name = self.tbl_relationship.item(row, 5).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n for id_ in ids_bulk:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 2).text()):\n bulk_load_use = self.tbl_relationship.item(row, 6).text()\n bulk_load_name = self.tbl_relationship.item(row, 7).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_load_use, bulk_load_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#e601ff\"), QColor(\"#e601ff\"))\n\n elif current_text == \"Matched Outlines\":\n row = self.tbl_relationship.selectionModel().selectedRows()[0].row()\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n id_bulk = int(self.tbl_relationship.item(row, 1).text())\n\n ids_existing = [id_existing]\n ids_bulk = [id_bulk]\n\n self.insert_into_list(self.lst_existing, ids_existing)\n self.insert_into_list(self.lst_bulk, ids_bulk)\n self.update_list_item_color(QColor(\"#00b4d4\"), QColor(\"#00b4d4\"))\n self.lyr_existing.selectByIds(ids_existing)\n self.lyr_bulk_load.selectByIds(ids_bulk)\n self.btn_unlink.setEnabled(True)\n\n # Add matched attributes to list for displaying\n for id_ in ids_existing:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n existing_use = self.tbl_relationship.item(row, 3).text()\n existing_name = self.tbl_relationship.item(row, 4).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n for id_ in ids_bulk:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 1).text()):\n bulk_load_use = self.tbl_relationship.item(row, 5).text()\n bulk_load_name = self.tbl_relationship.item(row, 6).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_load_use, bulk_load_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#00b4d4\"), QColor(\"#00b4d4\"))\n\n elif current_text == \"Removed Outlines\":\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n self.insert_into_list(self.lst_existing, [id_existing])\n self.update_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n self.lyr_existing.selectByIds([id_existing])\n self.lyr_bulk_load.selectByIds([])\n\n # Add removed attributes to list for displaying\n for id_ in [id_existing]:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n existing_use = self.tbl_relationship.item(row, 2).text()\n existing_name = self.tbl_relationship.item(row, 3).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n self.update_attr_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n\n elif current_text == \"Added Outlines\":\n id_bulk = int(self.tbl_relationship.item(row, 0).text())\n self.insert_into_list(self.lst_bulk, [id_bulk])\n self.update_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n self.lyr_bulk_load.selectByIds([id_bulk])\n self.btn_delete.setEnabled(True)\n\n # Add added attributes to list for displaying\n for id_ in [id_bulk]:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n bulk_use = self.tbl_relationship.item(row, 1).text()\n bulk_name = self.tbl_relationship.item(row, 2).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_use, bulk_name]]\n )\n self.update_attr_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n\n if self.zoom:\n self.zoom_to_feature()\n\n def btn_qa_status_clicked(self, qa_status, commit_status=True):\n\n selected_rows = [\n index.row()\n for index in self.tbl_relationship.selectionModel().selectedRows()\n ]\n if not selected_rows:\n return\n self.tbl_relationship.itemSelectionChanged.disconnect(\n self.tbl_relationship_item_selection_changed\n )\n self.db.open_cursor()\n\n qa_status_id = self.get_qa_status_id(qa_status)\n current_text = self.cmb_relationship.currentText()\n\n ids_existing, ids_bulk = [], []\n existing_use, existing_name = [], []\n\n if current_text == \"Related Outlines\":\n if qa_status_id == 5:\n return\n qa_column = 3\n for row in selected_rows:\n id_existing = int(self.tbl_relationship.item(row, 1).text())\n id_bulk = int(self.tbl_relationship.item(row, 2).text())\n self.update_qa_status_in_related(id_existing, id_bulk, qa_status_id)\n ids_existing, ids_bulk = self.find_related_existing_outlines(id_bulk)\n elif current_text == \"Matched Outlines\":\n if qa_status_id == 5:\n return\n qa_column = 2\n for row in selected_rows:\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n id_bulk = int(self.tbl_relationship.item(row, 1).text())\n self.update_qa_status_in_matched(id_existing, id_bulk, qa_status_id)\n ids_existing.append(id_existing)\n ids_bulk.append(id_bulk)\n elif current_text == \"Removed Outlines\":\n qa_column = 1\n selected_ids = []\n for row in selected_rows:\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n selected_ids.append(id_existing)\n self.update_qa_status_in_removed(id_existing, qa_status_id)\n ids_existing.append(id_existing)\n if qa_status_id == 5:\n self.copy_and_match_removed_building()\n self.cmb_relationship.setCurrentIndex(\n self.cmb_relationship.findText(\"Matched Outlines\")\n )\n\n if commit_status:\n self.db.commit_open_cursor()\n\n self.refresh_tbl_relationship()\n self.reset_buttons()\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n\n # Move to the next 'not checked'\n if qa_status_id != 5:\n for row in range(max(selected_rows) + 1, self.tbl_relationship.rowCount()):\n if self.scroll_to_next(row, qa_column, selected_rows):\n break\n if not self.tbl_relationship.selectionModel().selectedRows():\n self.tbl_relationship.selectRow(max(selected_rows))\n item = self.tbl_relationship.item(max(selected_rows), qa_column)\n self.tbl_relationship.scrollToItem(item)\n elif qa_status_id == 5:\n for row in range(self.tbl_relationship.rowCount()):\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n if id_existing in selected_ids:\n self.zoom = False\n self.tbl_relationship.selectRow(row)\n self.tbl_relationship.scrollToItem(\n self.tbl_relationship.item(row, qa_column)\n )\n self.zoom = True\n break\n if len(selected_ids) > 1:\n self.message_bar_edit.pushMessage(\n \"You cannot have multiple selected matched relationships. \"\n \"The first (ordered numerically) has been selected\"\n )\n\n def zoom_to_next(self):\n found = False\n selected_rows = [\n index.row()\n for index in self.tbl_relationship.selectionModel().selectedRows()\n ]\n if not selected_rows:\n selected_rows = [-1]\n current_text = self.cmb_relationship.currentText()\n if current_text == \"Related Outlines\":\n qa_column = 3\n elif current_text == \"Matched Outlines\":\n qa_column = 2\n elif current_text == \"Removed Outlines\":\n qa_column = 1\n for row in range(max(selected_rows) + 1, self.tbl_relationship.rowCount()):\n if self.scroll_to_next(row, qa_column, selected_rows):\n found = True\n break\n if not found:\n selected_rows = [0]\n for row in range(self.tbl_relationship.rowCount()):\n if self.scroll_to_next(row, qa_column, selected_rows):\n break\n\n def cb_lyr_bulk_load_state_changed(self):\n legend = QgsProject.instance().layerTreeRoot()\n if self.cb_lyr_bulk_load.isChecked():\n legend.findLayer(\n self.lyr_added_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(\n self.lyr_matched_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(\n self.lyr_related_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(self.lyr_added_bulk_load.id()).setItemVisibilityChecked(\n True\n )\n legend.findLayer(self.lyr_matched_bulk_load.id()).setItemVisibilityChecked(\n True\n )\n legend.findLayer(self.lyr_related_bulk_load.id()).setItemVisibilityChecked(\n True\n )\n else:\n legend.findLayer(\n self.lyr_added_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(\n self.lyr_matched_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(\n self.lyr_related_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(self.lyr_added_bulk_load.id()).setItemVisibilityChecked(\n False\n )\n legend.findLayer(self.lyr_matched_bulk_load.id()).setItemVisibilityChecked(\n False\n )\n legend.findLayer(self.lyr_related_bulk_load.id()).setItemVisibilityChecked(\n False\n )\n\n def cb_lyr_existing_state_changed(self):\n legend = QgsProject.instance().layerTreeRoot()\n if self.cb_lyr_existing.isChecked():\n legend.findLayer(\n self.lyr_removed_existing_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(\n self.lyr_matched_existing_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(\n self.lyr_related_existing_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(self.lyr_removed_existing.id()).setItemVisibilityChecked(\n True\n )\n legend.findLayer(self.lyr_matched_existing.id()).setItemVisibilityChecked(\n True\n )\n legend.findLayer(self.lyr_related_existing.id()).setItemVisibilityChecked(\n True\n )\n else:\n legend.findLayer(\n self.lyr_removed_existing_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(\n self.lyr_matched_existing_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(\n self.lyr_related_existing_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(self.lyr_removed_existing.id()).setItemVisibilityChecked(\n False\n )\n legend.findLayer(self.lyr_matched_existing.id()).setItemVisibilityChecked(\n False\n )\n legend.findLayer(self.lyr_related_existing.id()).setItemVisibilityChecked(\n False\n )\n\n def cb_autosave_state_changed(self):\n if self.btn_save.isEnabled():\n self.unfinished_error_msg()\n self.cb_autosave.setCheckState(0)\n self.autosave = False\n self.btn_save.setVisible(True)\n return\n if self.cb_autosave.isChecked():\n if self.confirm_to_autosave():\n self.autosave = True\n self.btn_save.setVisible(False)\n else:\n self.cb_autosave.setCheckState(0)\n self.autosave = False\n self.btn_save.setVisible(True)\n else:\n self.autosave = False\n self.btn_save.setVisible(True)\n\n def layers_removed(self, layerids):\n self.layer_registry.update_layers()\n layers = [\n \"added_bulk_load_in_edit\",\n \"removed_existing_in_edit\",\n \"matched_existing_in_edit\",\n \"matched_bulk_load_in_edit\",\n \"related_existing_in_edit\",\n \"related_bulk_load_in_edit\",\n \"added_outlines\",\n \"removed_outlines\",\n \"matched_existing_outlines\",\n \"matched_bulk_load_outlines\",\n \"related_existing_outlines\",\n \"related_bulk_load_outlines\",\n \"bulk_load_outlines\",\n \"existing_subset_extracts\",\n ]\n for layer in layers:\n if layer in layerids:\n self.cmb_relationship.setDisabled(1)\n self.btn_qa_not_checked.setDisabled(1)\n self.btn_qa_refer2supplier.setDisabled(1)\n self.btn_qa_pending.setDisabled(1)\n self.btn_qa_okay.setDisabled(1)\n self.btn_qa_not_removed.setDisabled(1)\n self.btn_maptool.setDisabled(1)\n self.btn_unlink.setDisabled(1)\n self.btn_matched.setDisabled(1)\n self.btn_related.setDisabled(1)\n self.btn_delete.setDisabled(1)\n self.btn_cancel.setDisabled(1)\n self.btn_save.setDisabled(1)\n self.cb_autosave.setDisabled(1)\n self.cb_lyr_bulk_load.setDisabled(1)\n self.cb_lyr_existing.setDisabled(1)\n iface.messageBar().pushMessage(\n \"ERROR\",\n \"Required layer Removed! Please reload the buildings plugin or the current frame before continuing\",\n level=Qgis.Critical,\n duration=5,\n )\n return\n\n def copy_and_match_removed_building(self):\n # iterate through all the selected removed buildings\n for feature in self.lyr_existing.selectedFeatures():\n # get geometry\n geometry = self.db.execute_no_commit(\n general_select.convert_geometry, (feature.geometry().asWkt(),)\n )\n geometry = geometry.fetchall()[0][0]\n sql = (\n buildings_select.building_outlines_capture_method_id_by_building_outline_id\n )\n building_outline_id = feature.attributes()[0]\n # get capture method of existing outline\n capture_method = self.db.execute_no_commit(sql, (building_outline_id,))\n capture_method = capture_method.fetchall()[0][0]\n sql = (\n bulk_load_select.bulk_load_outlines_capture_source_by_supplied_dataset_id\n )\n # get capture source of current dataset\n capture_source = self.db.execute_no_commit(sql, (self.current_dataset,))\n capture_source = capture_source.fetchall()[0][0]\n # get suburb, town_city and territorial authority of existing outline\n sql = (\n buildings_select.building_outlines_suburb_locality_id_by_building_outline_id\n )\n suburb = self.db.execute_no_commit(sql, (building_outline_id,))\n suburb = suburb.fetchall()[0][0]\n sql = buildings_select.building_outlines_town_city_id_by_building_outline_id\n town_city = self.db.execute_no_commit(sql, (building_outline_id,))\n town_city = town_city.fetchall()[0][0]\n sql = (\n buildings_select.building_outlines_territorial_authority_id_by_building_outline\n )\n territorial_auth = self.db.execute_no_commit(sql, (building_outline_id,))\n territorial_auth = territorial_auth.fetchall()[0][0]\n # insert outline into building_bulk_load.bulk_load_outlines\n sql = \"SELECT buildings_bulk_load.bulk_load_outlines_insert(%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n bulk_load_id = self.db.execute_no_commit(\n sql,\n (\n self.current_dataset,\n None,\n 2,\n capture_method,\n capture_source,\n suburb,\n town_city,\n territorial_auth,\n geometry,\n ),\n )\n bulk_load_id = bulk_load_id.fetchall()[0][0]\n # remove existing building from removed table\n sql = \"SELECT buildings_bulk_load.removed_delete_existing_outline(%s);\"\n self.db.execute_no_commit(sql, (building_outline_id,))\n # add existing and new building to matched table\n sql = \"SELECT buildings_bulk_load.matched_insert_building_outlines(%s, %s);\"\n self.db.execute_no_commit(sql, (bulk_load_id, building_outline_id))\n # change to not checked\n sql = \"SELECT buildings_bulk_load.matched_update_qa_status_id(%s, %s, %s);\"\n self.db.execute_no_commit(sql, (1, building_outline_id, bulk_load_id))\n # refresh to get new outlines\n iface.mapCanvas().refreshAllLayers()\n\n def confirm_to_autosave(self):\n reply = self.msgbox.exec_()\n if reply == QMessageBox.Yes:\n return True\n return False\n\n def switch_btn_match_and_related(self):\n if self.lst_bulk.count() == 0 or self.lst_existing.count() == 0:\n pass\n elif self.lst_bulk.count() == 1 and self.lst_existing.count() == 1:\n self.btn_matched.setEnabled(True)\n self.btn_related.setEnabled(False)\n self.btn_delete.setEnabled(True)\n else:\n self.btn_related.setEnabled(True)\n self.btn_matched.setEnabled(False)\n self.btn_delete.setEnabled(True)\n\n def multi_relationship_selected_error_msg(self):\n self.error_dialog = ErrorDialog()\n self.error_dialog.fill_report(\n \"\\n------------- MULTIPLE RELATIONSHIP SELECTED -------------\"\n \"\\n\\nThere are multiple relationships selected. Please unlink \"\n \"matched or related outlines before altering relationships.\"\n )\n self.error_dialog.show()\n\n def find_added_outlines(self, id_bulk):\n result = self.db.execute_return(\n bulk_load_select.added_by_bulk_load_outline_id_dataset_id,\n (id_bulk, self.current_dataset),\n )\n return result.fetchone()\n\n def find_removed_outlines(self, id_existing):\n result = self.db.execute_return(\n bulk_load_select.removed_by_existing_outline_id_dataset_id,\n (id_existing, self.current_dataset),\n )\n return result.fetchone()\n\n def find_matched_existing_outlines(self, id_bulk):\n result = self.db.execute_return(\n bulk_load_select.matched_by_bulk_load_outline_id_dataset_id,\n (id_bulk, self.current_dataset),\n )\n return result.fetchone()\n\n def find_matched_bulk_load_outlines(self, id_existing):\n ids_existing, ids_bulk = [], []\n existing_use, existing_name = [], []\n\n result = self.db.execute_return(\n bulk_load_select.matched_by_existing_outline_id_dataset_id,\n (id_existing, self.current_dataset),\n )\n return result.fetchone()\n\n def find_related_existing_outlines(self, id_bulk):\n ids_existing, ids_bulk = [], []\n existing_use, existing_name = [], []\n bulk_load_use, bulk_load_name = [], []\n\n result = self.db.execute_return(\n bulk_load_select.related_by_bulk_load_outline_id_dataset_id,\n (id_bulk, self.current_dataset),\n )\n for (\n id_existing,\n id_bulk,\n existing_use,\n existing_name,\n bulk_load_use,\n bulk_load_name,\n ) in result.fetchall():\n ids_existing.append(id_existing)\n ids_bulk.append(id_bulk)\n return list(set(ids_existing)), list(set(ids_bulk))\n\n def find_related_bulk_load_outlines(self, id_existing):\n ids_existing, ids_bulk = [], []\n existing_use, existing_name = [], []\n\n result = self.db.execute_return(\n bulk_load_select.related_by_existing_outline_id_dataset_id,\n (id_existing, self.current_dataset),\n )\n for (id_existing, id_bulk) in result.fetchall():\n ids_existing.append(id_existing)\n ids_bulk.append(id_bulk)\n return list(set(ids_existing)), list(set(ids_bulk))\n\n def insert_into_table(self, tbl, ids):\n rows = []\n for (id_existing, id_bulk) in ids:\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n if id_existing:\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_existing))\n if id_bulk:\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % id_bulk))\n rows.append(row_tbl)\n return rows\n\n def connect_to_error_msg(self):\n self.tool.multi_selection_changed.disconnect(self.multi_selection_changed)\n self.tool.multi_selection_changed.connect(self.unfinished_error_msg)\n self.tbl_relationship.itemSelectionChanged.disconnect(\n self.tbl_relationship_item_selection_changed\n )\n self.tbl_relationship.itemSelectionChanged.connect(self.unfinished_error_msg)\n self.reset_buttons()\n self.btn_maptool.setEnabled(False)\n\n def disconnect_to_error_msg(self):\n self.tool.multi_selection_changed.disconnect(self.unfinished_error_msg)\n self.tool.multi_selection_changed.connect(self.multi_selection_changed)\n self.tbl_relationship.itemSelectionChanged.disconnect(self.unfinished_error_msg)\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n self.reset_buttons()\n self.btn_maptool.setEnabled(True)\n\n def has_no_selection_in_table(self, tbl):\n if not tbl.selectionModel().selectedRows():\n return True\n return False\n\n def insert_into_lyr_removed_in_edit(self, ids_existing):\n for id_existing in ids_existing:\n filter_ = self.lyr_removed_existing_in_edit.subsetString()\n self.lyr_removed_existing_in_edit.setSubsetString(\n filter_ + \" or building_outline_id = %s\" % id_existing\n )\n\n def insert_into_lyr_added_in_edit(self, ids_bulk):\n for id_bulk in ids_bulk:\n filter_ = self.lyr_added_bulk_load_in_edit.subsetString()\n self.lyr_added_bulk_load_in_edit.setSubsetString(\n filter_ + \" or bulk_load_outline_id = %s\" % id_bulk\n )\n\n def delete_original_relationship_in_existing(self, id_existing):\n \"\"\"\n Remove features in the view layer\n \"\"\"\n if not self.lyr_removed_existing.subsetString():\n self.lyr_removed_existing.setSubsetString(\n '\"building_outline_id\" != %s' % id_existing\n )\n else:\n self.lyr_removed_existing.setSubsetString(\n self.lyr_removed_existing.subsetString()\n + ' and \"building_outline_id\" != %s' % id_existing\n )\n\n if not self.lyr_matched_existing.subsetString():\n self.lyr_matched_existing.setSubsetString(\n '\"building_outline_id\" != %s' % id_existing\n )\n else:\n self.lyr_matched_existing.setSubsetString(\n self.lyr_matched_existing.subsetString()\n + ' and \"building_outline_id\" != %s' % id_existing\n )\n\n if not self.lyr_related_existing.subsetString():\n self.lyr_related_existing.setSubsetString(\n '\"building_outline_id\" != %s' % id_existing\n )\n else:\n self.lyr_related_existing.setSubsetString(\n self.lyr_related_existing.subsetString()\n + ' and \"building_outline_id\" != %s' % id_existing\n )\n\n def delete_original_relationship_in_bulk_load(self, id_bulk):\n \"\"\"\n Remove features in the view layer\n \"\"\"\n if not self.lyr_added_bulk_load.subsetString():\n self.lyr_added_bulk_load.setSubsetString(\n '\"bulk_load_outline_id\" != %s' % id_bulk\n )\n else:\n self.lyr_added_bulk_load.setSubsetString(\n self.lyr_added_bulk_load.subsetString()\n + ' and \"bulk_load_outline_id\" != %s' % id_bulk\n )\n\n if not self.lyr_matched_bulk_load.subsetString():\n self.lyr_matched_bulk_load.setSubsetString(\n '\"bulk_load_outline_id\" != %s' % id_bulk\n )\n else:\n self.lyr_matched_bulk_load.setSubsetString(\n self.lyr_matched_bulk_load.subsetString()\n + ' and \"bulk_load_outline_id\" != %s' % id_bulk\n )\n\n if not self.lyr_related_bulk_load.subsetString():\n self.lyr_related_bulk_load.setSubsetString(\n '\"bulk_load_outline_id\" != %s' % id_bulk\n )\n else:\n self.lyr_related_bulk_load.setSubsetString(\n self.lyr_related_bulk_load.subsetString()\n + ' and \"bulk_load_outline_id\" != %s' % id_bulk\n )\n\n def reset_buttons(self):\n self.btn_unlink.setEnabled(False)\n self.btn_matched.setEnabled(False)\n self.btn_related.setEnabled(False)\n self.btn_delete.setEnabled(False)\n self.btn_save.setEnabled(False)\n self.btn_maptool.setEnabled(True)\n self.btn_copy_from_existing.setEnabled(False)\n self.btn_set_attributes.setEnabled(False)\n self.btn_delete_attributes.setEnabled(False)\n self.cbox_use.setEnabled(False)\n self.ledit_name.setEnabled(False)\n\n def qa_button_set_enable(self, boolean):\n self.btn_qa_okay.setEnabled(boolean)\n self.btn_qa_pending.setEnabled(boolean)\n self.btn_qa_refer2supplier.setEnabled(boolean)\n self.btn_qa_not_checked.setEnabled(boolean)\n\n def insert_into_list(self, lst, ids):\n for fid in ids:\n lst.addItem(QListWidgetItem(\"%s\" % fid))\n\n def get_ids_from_lst(self, lst):\n feat_ids = []\n for row in range(lst.count()):\n feat_ids.append(int(lst.item(row).text()))\n return feat_ids\n\n @staticmethod\n def get_lst_content(lst):\n \"\"\"\n Returns a list of tuples of the row_id and the row content evaluated using `literal_eval`.\n \"\"\"\n return [(n, literal_eval(lst.item(n).text())) for n in range(lst.count())]\n\n def disable_listwidget(self, lst):\n for row in range(lst.count()):\n item = lst.item(row)\n item.setFlags(Qt.ItemIsEnabled)\n\n def update_list_item_color(self, existing_color, bulk_color):\n for i in range(self.lst_existing.count()):\n self.lst_existing.item(i).setForeground(QColor(existing_color))\n for i in range(self.lst_bulk.count()):\n self.lst_bulk.item(i).setForeground(QColor(bulk_color))\n\n def update_attr_list_item_color(self, existing_color, bulk_color):\n for i in range(self.lst_existing_attrs.count()):\n self.lst_existing_attrs.item(i).setForeground(QColor(existing_color))\n for i in range(self.lst_bulk_attrs.count()):\n self.lst_bulk_attrs.item(i).setForeground(QColor(bulk_color))\n\n def delete_from_lyr_removed_in_edit(self, id_existing):\n filter_ = self.lyr_removed_existing_in_edit.subsetString()\n self.lyr_removed_existing_in_edit.setSubsetString(\n \"(\" + filter_ + ') and \"building_outline_id\" != %s' % id_existing\n )\n\n def delete_from_lyr_added_in_edit(self, id_bulk):\n filter_ = self.lyr_added_bulk_load_in_edit.subsetString()\n self.lyr_added_bulk_load_in_edit.setSubsetString(\n \"(\" + filter_ + ') and \"bulk_load_outline_id\" != %s' % id_bulk\n )\n\n def insert_into_lyr_matched_existing_in_edit(self, id_existing):\n self.lyr_matched_existing_in_edit.setSubsetString(\n '\"building_outline_id\" = %s' % id_existing\n )\n\n def insert_into_lyr_matched_bulk_load_in_edit(self, id_bulk):\n self.lyr_matched_bulk_load_in_edit.setSubsetString(\n '\"bulk_load_outline_id\" = %s' % id_bulk\n )\n\n def insert_into_lyr_related_existing_in_edit(self, id_existing):\n filter_ = self.lyr_related_existing_in_edit.subsetString()\n self.lyr_related_existing_in_edit.setSubsetString(\n filter_ + ' or \"building_outline_id\" = %s' % id_existing\n )\n\n def insert_into_lyr_related_bulk_load_in_edit(self, id_bulk):\n filter_ = self.lyr_related_bulk_load_in_edit.subsetString()\n self.lyr_related_bulk_load_in_edit.setSubsetString(\n filter_ + ' or \"bulk_load_outline_id\" = %s' % id_bulk\n )\n\n def delete_original_relationships(self):\n sql_delete_related_existing = (\n \"SELECT buildings_bulk_load.related_delete_existing_outlines(%s);\"\n )\n sql_delete_matched_existing = (\n \"SELECT buildings_bulk_load.matched_delete_existing_outlines(%s);\"\n )\n sql_delete_removed = (\n \"SELECT buildings_bulk_load.removed_delete_existing_outline(%s);\"\n )\n sql_delete_added = (\n \"SELECT buildings_bulk_load.added_delete_bulk_load_outlines(%s);\"\n )\n\n for row in range(self.lst_existing.count()):\n item = self.lst_existing.item(row)\n id_existing = int(item.text())\n self.db.execute_no_commit(sql_delete_removed, (id_existing,))\n self.db.execute_no_commit(sql_delete_matched_existing, (id_existing,))\n self.db.execute_no_commit(sql_delete_related_existing, (id_existing,))\n\n for row in range(self.lst_bulk.count()):\n item = self.lst_bulk.item(row)\n id_bulk = int(item.text())\n\n self.db.execute_no_commit(sql_delete_added, (id_bulk,))\n\n def insert_new_added_outlines(self):\n # added\n sql_insert_added = (\n \"SELECT buildings_bulk_load.added_insert_bulk_load_outlines(%s, %s);\"\n )\n for feat in self.lyr_added_bulk_load_in_edit.getFeatures():\n id_bulk = feat[\"bulk_load_outline_id\"]\n self.db.execute_no_commit(sql_insert_added, (id_bulk, 2))\n\n def insert_new_removed_outlines(self):\n # removed\n sql_insert_removed = (\n \"SELECT buildings_bulk_load.removed_insert_building_outlines(%s);\"\n )\n for feat in self.lyr_removed_existing_in_edit.getFeatures():\n id_existing = feat[\"building_outline_id\"]\n self.db.execute_no_commit(sql_insert_removed, (id_existing,))\n\n def insert_new_matched_outlines(self):\n # matched\n sql_insert_matched = (\n \"SELECT buildings_bulk_load.matched_insert_building_outlines(%s, %s);\"\n )\n for feat1 in self.lyr_matched_bulk_load_in_edit.getFeatures():\n id_bulk = feat1[\"bulk_load_outline_id\"]\n for feat2 in self.lyr_matched_existing_in_edit.getFeatures():\n id_existing = feat2[\"building_outline_id\"]\n self.db.execute_no_commit(sql_insert_matched, (id_bulk, id_existing))\n\n def insert_new_related_outlines(self):\n # related\n related_outlines = [\n feat for feat in self.lyr_related_bulk_load_in_edit.getFeatures()\n ]\n if related_outlines:\n sql_insert_related_group = (\n \"SELECT buildings_bulk_load.related_group_insert();\"\n )\n result = self.db.execute_no_commit(sql_insert_related_group)\n new_group_id = result.fetchone()[0]\n sql_insert_related = (\n \"SELECT buildings_bulk_load.related_insert_building_outlines(%s, %s, %s);\"\n )\n for feat1 in self.lyr_related_bulk_load_in_edit.getFeatures():\n id_bulk = feat1[\"bulk_load_outline_id\"]\n for feat2 in self.lyr_related_existing_in_edit.getFeatures():\n id_existing = feat2[\"building_outline_id\"]\n self.db.execute_no_commit(\n sql_insert_related, (new_group_id, id_bulk, id_existing)\n )\n\n def update_bulkload_attributes(self):\n sql_update_attrs = \"\"\"\n UPDATE buildings_bulk_load.bulk_load_outlines\n SET bulk_load_use_id = %s, bulk_load_name = %s\n WHERE bulk_load_outline_id = %s;\n \"\"\"\n for row_id, (id_, use, name) in self.get_lst_content(self.lst_bulk_attrs):\n use_id = self.valid_building_use_ids[use]\n if name in {\"\", \"None\"}:\n name = None\n self.db.execute_no_commit(sql_update_attrs, (use_id, name, id_))\n\n def disable_tbl_editing(self, tbl):\n \"\"\"Disable editing so item cannot be changed in the table\"\"\"\n for row in range(tbl.rowCount()):\n tbl.showRow(row)\n for col in range(tbl.columnCount()):\n if tbl.item(row, col):\n tbl.item(row, col).setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)\n\n def refresh_tbl_relationship(self):\n \"\"\"Refresh tbl_relationship by switching cmb_relationship\"\"\"\n index = self.cmb_relationship.currentIndex()\n self.cmb_relationship.setCurrentIndex(0)\n self.cmb_relationship.setCurrentIndex(index)\n\n def populate_cmb_relationship(self):\n \"\"\"Populates cmb_relationship\"\"\"\n self.cmb_relationship.clear()\n item_list = [\n \"Removed Outlines\",\n \"Matched Outlines\",\n \"Related Outlines\",\n \"Added Outlines\",\n ]\n self.cmb_relationship.addItems([\"\"] + item_list)\n\n def init_tbl_relationship(self, header_items):\n \"\"\"Initiates tbl_relationship\"\"\"\n tbl = self.tbl_relationship\n tbl.setRowCount(0)\n tbl.setColumnCount(len(header_items))\n\n for i, header_item in enumerate(header_items):\n tbl.setHorizontalHeaderItem(i, QTableWidgetItem(header_item))\n\n tbl.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n tbl.verticalHeader().setVisible(False)\n\n tbl.setSelectionBehavior(QAbstractItemView.SelectRows)\n tbl.setSelectionMode(QAbstractItemView.SingleSelection)\n\n tbl.setShowGrid(True)\n\n def populate_tbl_related(self):\n \"\"\"Populates tbl_relationship when cmb_relationship switches to related\"\"\"\n tbl = self.tbl_relationship\n result = self.db.execute_return(\n bulk_load_select.related_by_dataset_id, (self.current_dataset,)\n )\n for (\n id_group,\n id_existing,\n id_bulk,\n qa_status,\n exist_use,\n exist_name,\n bulk_use,\n bulk_name,\n ) in result.fetchall():\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_group))\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % id_existing))\n tbl.setItem(row_tbl, 2, QTableWidgetItem(\"%s\" % id_bulk))\n tbl.setItem(row_tbl, 3, QTableWidgetItem(\"%s\" % qa_status))\n tbl.setItem(row_tbl, 4, QTableWidgetItem(\"%s\" % exist_use))\n tbl.setItem(row_tbl, 5, QTableWidgetItem(\"%s\" % exist_name))\n tbl.setItem(row_tbl, 6, QTableWidgetItem(\"%s\" % bulk_use))\n tbl.setItem(row_tbl, 7, QTableWidgetItem(\"%s\" % bulk_name))\n\n def populate_tbl_matched(self):\n \"\"\"Populates tbl_relationship when cmb_relationship switches to matched\"\"\"\n tbl = self.tbl_relationship\n result = self.db.execute_return(\n bulk_load_select.matched_by_dataset_id, (self.current_dataset,)\n )\n for (\n id_existing,\n id_bulk,\n qa_status,\n exist_use,\n exist_name,\n bulk_use,\n bulk_name,\n ) in result.fetchall():\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_existing))\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % id_bulk))\n tbl.setItem(row_tbl, 2, QTableWidgetItem(\"%s\" % qa_status))\n tbl.setItem(row_tbl, 3, QTableWidgetItem(\"%s\" % exist_use))\n tbl.setItem(row_tbl, 4, QTableWidgetItem(\"%s\" % exist_name))\n tbl.setItem(row_tbl, 5, QTableWidgetItem(\"%s\" % bulk_use))\n tbl.setItem(row_tbl, 6, QTableWidgetItem(\"%s\" % bulk_name))\n\n def populate_tbl_removed(self):\n \"\"\"Populates tbl_relationship when cmb_relationship switches to removed\"\"\"\n tbl = self.tbl_relationship\n result = self.db.execute_return(\n bulk_load_select.removed_by_dataset_id, (self.current_dataset,)\n )\n for (id_existing, qa_status, exist_use, exist_name) in result.fetchall():\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_existing))\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % qa_status))\n tbl.setItem(row_tbl, 2, QTableWidgetItem(\"%s\" % exist_use))\n tbl.setItem(row_tbl, 3, QTableWidgetItem(\"%s\" % exist_name))\n\n def populate_tbl_added(self):\n \"\"\"Populates tbl_relationship when cmb_relationship switches to added\"\"\"\n tbl = self.tbl_relationship\n result = self.db.execute_return(\n bulk_load_select.added_by_dataset_id, (self.current_dataset,)\n )\n for (id_bulk_load, bulk_use, bulk_name) in result.fetchall():\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_bulk_load))\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % bulk_use))\n tbl.setItem(row_tbl, 2, QTableWidgetItem(\"%s\" % bulk_name))\n\n def is_empty_tbl_relationship(self, relationship):\n if self.tbl_relationship.rowCount() == 0:\n self.message_bar_qa.pushMessage(\n \"%s are not available in the current dataset.\" % relationship\n )\n return True\n return False\n\n def get_qa_status_id(self, qa_status):\n \"\"\"Returns qa_status_id according to the sender button\"\"\"\n if qa_status == \"Okay\":\n qa_status_id = 2\n elif qa_status == \"Pending\":\n qa_status_id = 3\n elif qa_status == \"Refer to Supplier\":\n qa_status_id = 4\n elif qa_status == \"Not Checked\":\n qa_status_id = 1\n elif qa_status == \"Not Removed\":\n qa_status_id = 5\n else:\n qa_status_id = None\n return qa_status_id\n\n def zoom_to_feature(self):\n\n extent = None\n for lyr in [self.lyr_existing, self.lyr_bulk_load]:\n selected_feat = [feat for feat in lyr.selectedFeatures()]\n if selected_feat:\n if not extent:\n extent = lyr.boundingBoxOfSelected()\n else:\n extent.combineExtentWith(lyr.boundingBoxOfSelected())\n if extent:\n iface.mapCanvas().setExtent(extent)\n iface.mapCanvas().zoomScale(300.0)\n\n def scroll_to_next(self, row, qa_column, selected_rows):\n item = self.tbl_relationship.item(row, qa_column)\n if item.text() == \"Not Checked\":\n self.tbl_relationship.selectRow(row)\n self.tbl_relationship.scrollToItem(item)\n return True\n return False\n\n def update_qa_status_in_related(self, id_existing, id_bulk, qa_status_id):\n \"\"\"Updates qa_status_id in related table\"\"\"\n sql_update_related = (\n \"SELECT buildings_bulk_load.related_update_qa_status_id(%s, %s, %s);\"\n )\n self.db.execute_no_commit(\n sql_update_related, (qa_status_id, id_existing, id_bulk)\n )\n\n def update_qa_status_in_matched(self, id_existing, id_bulk, qa_status_id):\n \"\"\"Updates qa_status_id in matched table\"\"\"\n sql_update_matched = (\n \"SELECT buildings_bulk_load.matched_update_qa_status_id(%s, %s, %s);\"\n )\n self.db.execute_no_commit(\n sql_update_matched, (qa_status_id, id_existing, id_bulk)\n )\n\n def update_qa_status_in_removed(self, id_existing, qa_status_id):\n \"\"\"Updates qa_status_id in removed table\"\"\"\n sql_update_removed = (\n \"SELECT buildings_bulk_load.removed_update_qa_status_id(%s, %s);\"\n )\n self.db.execute_no_commit(sql_update_removed, (qa_status_id, id_existing))\n\n def select_row_in_tbl_matched(self, id_existing, id_bulk):\n tbl = self.tbl_relationship\n index = self.cmb_relationship.findText(\"Matched Outlines\")\n if self.cmb_relationship.currentIndex() != index:\n self.cmb_relationship.setCurrentIndex(index)\n for row in range(self.tbl_relationship.rowCount()):\n if (\n int(tbl.item(row, 0).text()) == id_existing\n and int(tbl.item(row, 1).text()) == id_bulk\n ):\n tbl.selectRow(row)\n tbl.scrollToItem(tbl.item(row, 0))\n\n def select_row_in_tbl_related(self, id_existing, id_bulk):\n tbl = self.tbl_relationship\n index = self.cmb_relationship.findText(\"Related Outlines\")\n if self.cmb_relationship.currentIndex() != index:\n self.cmb_relationship.setCurrentIndex(index)\n self.tbl_relationship.setSelectionMode(QAbstractItemView.MultiSelection)\n for row in range(self.tbl_relationship.rowCount()):\n if (\n int(tbl.item(row, 1).text()) == id_existing\n and int(tbl.item(row, 2).text()) == id_bulk\n ):\n tbl.selectRow(row)\n tbl.scrollToItem(tbl.item(row, 0))\n\n def select_row_in_tbl_removed(self, id_existing):\n tbl = self.tbl_relationship\n index = self.cmb_relationship.findText(\"Removed Outlines\")\n if self.cmb_relationship.currentIndex() != index:\n self.cmb_relationship.setCurrentIndex(index)\n self.tbl_relationship.setSelectionMode(QAbstractItemView.MultiSelection)\n for row in range(self.tbl_relationship.rowCount()):\n if int(tbl.item(row, 0).text()) == id_existing:\n tbl.selectRow(row)\n tbl.scrollToItem(tbl.item(row, 0))\n\n def select_row_in_tbl_added(self, id_bulk):\n tbl = self.tbl_relationship\n index = self.cmb_relationship.findText(\"Added Outlines\")\n if self.cmb_relationship.currentIndex() != index:\n self.cmb_relationship.setCurrentIndex(index)\n self.tbl_relationship.setSelectionMode(QAbstractItemView.MultiSelection)\n for row in range(self.tbl_relationship.rowCount()):\n if int(tbl.item(row, 0).text()) == id_bulk:\n tbl.selectRow(row)\n tbl.scrollToItem(tbl.item(row, 0))\n\n def canvas_add_outline(self):\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.tbl_relationship.clearSelection()\n\n self.edit_dialog.add_outline()\n self.edit_dialog.show()\n self.change_instance = self.edit_dialog.get_change_instance()\n\n self.circle_tool = None\n self.polyline = None\n\n # setup circle button\n image_dir = os.path.join(__location__, \"..\", \"icons\")\n icon_path = os.path.join(image_dir, \"circle.png\")\n icon = QIcon()\n icon.addFile(icon_path, QSize(8, 8))\n self.circle_action = QAction(icon, \"Draw Circle\", iface.building_toolbar)\n iface.registerMainWindowAction(self.circle_action, \"Ctrl+0\")\n self.circle_action.triggered.connect(self.circle_tool_clicked)\n self.circle_action.setCheckable(True)\n iface.building_toolbar.addAction(self.circle_action)\n\n def canvas_edit_geometry(self):\n \"\"\"\n When edit geometry radio button toggled\n \"\"\"\n self.lyr_existing.removeSelection()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.edit_dialog.edit_geometry()\n self.edit_dialog.show()\n self.change_instance = self.edit_dialog.get_change_instance()\n\n def canvas_edit_attribute(self):\n \"\"\"\n When edit outline radio button toggled\n \"\"\"\n self.lyr_existing.removeSelection()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.edit_dialog.show()\n self.edit_dialog.edit_attribute()\n self.change_instance = self.edit_dialog.get_change_instance()\n\n def circle_tool_clicked(self):\n circle_tool.setup_circle(self)\n\n def edit_cancel_clicked(self):\n if len(QgsProject.instance().mapLayersByName(\"bulk_load_outlines\")) > 0:\n if isinstance(self.change_instance, bulk_load_changes.EditAttribute):\n try:\n self.lyr_bulk_load.selectionChanged.disconnect(\n self.change_instance.selection_changed\n )\n except TypeError:\n pass\n elif isinstance(self.change_instance, bulk_load_changes.EditGeometry):\n try:\n self.lyr_bulk_load.geometryChanged.disconnect(\n self.change_instance.geometry_changed\n )\n except TypeError:\n pass\n elif isinstance(self.change_instance, bulk_load_changes.AddBulkLoad):\n try:\n self.lyr_bulk_load.featureAdded.disconnect()\n except TypeError:\n pass\n try:\n self.lyr_bulk_load.featureDeleted.disconnect()\n except TypeError:\n pass\n try:\n self.lyr_bulk_load.geometryChanged.disconnect()\n except TypeError:\n pass\n if self.polyline:\n self.polyline.reset()\n if isinstance(self.circle_tool, PointTool):\n self.circle_tool.canvas_clicked.disconnect()\n self.circle_tool.mouse_moved.disconnect()\n self.circle_tool.deactivate()\n iface.actionPan().trigger()\n\n iface.actionCancelEdits().trigger()\n\n QgsProject.instance().layerWillBeRemoved.disconnect(self.layers_removed)\n\n QgsProject.instance().layerWillBeRemoved.connect(self.layers_removed)\n\n self.toolbar_setup()\n\n for val in [\n str(layer.id())\n for layer in QgsProject.instance().layerTreeRoot().layerOrder()\n ]:\n if \"existing_subset_extracts\" in val:\n self.lyr_existing.removeSelection()\n if \"bulk_load_outlines\" in val:\n self.lyr_bulk_load.removeSelection()\n\n self.tbl_relationship.clearSelection()\n\n self.btn_maptool.click()\n\n self.change_instance = None\n\n def reload_bulk_load_layer(self):\n \"\"\"To ensure QGIS has most up to date ID for the newly split feature see #349\"\"\"\n layer_tree_layer = QgsProject.instance().layerTreeRoot().findLayer(self.lyr_bulk_load.id())\n layer_tree_model = iface.layerTreeView().layerTreeModel()\n legend_nodes = layer_tree_model.layerLegendNodes(layer_tree_layer)\n legend_node_null = [ln for ln in legend_nodes if not ln.data(Qt.DisplayRole)]\n legend_node_null[0].setData(Qt.Unchecked, Qt.CheckStateRole)\n legend_node_null[0].setData(Qt.Checked, Qt.CheckStateRole)\n legend_node_added = [\n ln for ln in legend_nodes if ln.data(Qt.DisplayRole) == \"Added In Edit\"\n ]\n legend_node_added[0].setData(Qt.Unchecked, Qt.CheckStateRole)\n legend_node_added[0].setData(Qt.Checked, Qt.CheckStateRole)\n\n @property\n def valid_building_use_ids(self):\n \"\"\"self.valid_building_uses flipped to map use strings to use_id ints\"\"\"\n return {use: use_id for use_id, use in self.valid_building_uses.items()}\n", "repo_name": "linz/nz-buildings", "sub_path": "buildings/gui/alter_building_relationships.py", "file_name": "alter_building_relationships.py", "file_ext": "py", "file_size_in_byte": 94702, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "73", "api": [{"api_name": "os.path.path.realpath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 36, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.getcwd", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "qgis.PyQt.uic.loadUiType", "line_number": 37, "usage_type": "call"}, {"api_name": "qgis.PyQt.uic", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 38, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 38, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QFrame", "line_number": 42, "usage_type": "name"}, {"api_name": "buildings.utilities.database", "line_number": 77, "usage_type": "name"}, {"api_name": "buildings.utilities.layers.LayerRegistry", "line_number": 91, "usage_type": "call"}, {"api_name": "buildings.gui.edit_dialog.EditDialog", "line_number": 102, "usage_type": "call"}, {"api_name": "qgis.gui.QgsMessageBar", "line_number": 109, "usage_type": "call"}, {"api_name": "qgis.gui.QgsMessageBar", "line_number": 111, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 115, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 118, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 122, "usage_type": "name"}, {"api_name": "qgis.utils.iface.setActiveLayer", "line_number": 142, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 142, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.actions", "line_number": 147, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 147, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 147, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 149, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 150, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 151, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.QSize", "line_number": 152, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 153, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 153, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 153, "usage_type": "name"}, {"api_name": "qgis.utils.iface.registerMainWindowAction", "line_number": 154, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 154, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.addAction", "line_number": 156, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 156, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 156, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.actions", "line_number": 159, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 159, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 159, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 161, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 162, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 163, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.QSize", "line_number": 164, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 165, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 166, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 166, "usage_type": "name"}, {"api_name": "qgis.utils.iface.registerMainWindowAction", "line_number": 168, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 168, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.addAction", "line_number": 170, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 170, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 170, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.actions", "line_number": 173, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 173, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 173, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 175, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 176, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 177, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.QSize", "line_number": 178, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 179, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 180, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 180, "usage_type": "name"}, {"api_name": "qgis.utils.iface.registerMainWindowAction", "line_number": 182, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 182, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.addAction", "line_number": 184, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 184, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 184, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.show", "line_number": 186, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 186, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 186, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 193, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 196, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 199, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 202, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 205, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 210, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 213, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 216, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 219, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 226, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 242, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 242, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 247, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 247, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 425, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox.Question", "line_number": 426, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 426, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox.No", "line_number": 429, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 429, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox.Yes", "line_number": 429, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface.mapCanvas", "line_number": 437, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 437, "usage_type": "name"}, {"api_name": "buildings.utilities.multi_layer_selection.MultiLayerSelection", "line_number": 438, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView.MultiSelection", "line_number": 576, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView", "line_number": 576, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView.SingleSelection", "line_number": 589, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView", "line_number": 589, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 619, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 630, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 651, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 672, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 676, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 678, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 680, "usage_type": "call"}, {"api_name": "buildings.gui.error_dialog.ErrorDialog", "line_number": 687, "usage_type": "call"}, {"api_name": "buildings.gui.deletion_reason_dialog.DeletionReason", "line_number": 786, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 789, "usage_type": "call"}, {"api_name": "qgis.utils.iface.messageBar", "line_number": 807, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 807, "usage_type": "name"}, {"api_name": "qgis.utils.Qgis.Info", "line_number": 810, "usage_type": "attribute"}, {"api_name": "qgis.utils.Qgis", "line_number": 810, "usage_type": "name"}, {"api_name": "buildings.gui.error_dialog.ErrorDialog", "line_number": 841, "usage_type": "call"}, {"api_name": "qgis.utils.iface.mapCanvas", "line_number": 915, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 915, "usage_type": "name"}, {"api_name": "qgis.utils.iface.mapCanvas", "line_number": 937, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 937, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 960, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 960, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 963, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 963, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.actions", "line_number": 990, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 990, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 990, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.removeAction", "line_number": 992, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 992, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 992, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.hide", "line_number": 993, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 993, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 993, "usage_type": "name"}, {"api_name": "buildings.gui.bulk_load_frame.BulkLoadFrame", "line_number": 999, "usage_type": "call"}, {"api_name": "qgis.utils.iface.actionPan", "line_number": 1000, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 1000, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1093, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1118, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1130, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1155, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1160, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1174, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1179, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1192, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 1318, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 1318, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 1359, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 1359, "usage_type": "name"}, {"api_name": "qgis.utils.iface.messageBar", "line_number": 1454, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 1454, "usage_type": "name"}, {"api_name": "qgis.utils.Qgis.Critical", "line_number": 1457, "usage_type": "attribute"}, {"api_name": "qgis.utils.Qgis", "line_number": 1457, "usage_type": "name"}, {"api_name": "buildings.sql.general_select_statements.convert_geometry", "line_number": 1467, "usage_type": "attribute"}, {"api_name": "buildings.sql.general_select_statements", "line_number": 1467, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_select_statements.building_outlines_capture_method_id_by_building_outline_id", "line_number": 1471, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_select_statements", "line_number": 1471, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.bulk_load_outlines_capture_source_by_supplied_dataset_id", "line_number": 1478, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1478, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_select_statements.building_outlines_suburb_locality_id_by_building_outline_id", "line_number": 1485, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_select_statements", "line_number": 1485, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_select_statements.building_outlines_town_city_id_by_building_outline_id", "line_number": 1489, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_select_statements", "line_number": 1489, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_select_statements.building_outlines_territorial_authority_id_by_building_outline", "line_number": 1493, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_select_statements", "line_number": 1493, "usage_type": "name"}, {"api_name": "qgis.utils.iface.mapCanvas", "line_number": 1524, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 1524, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox.Yes", "line_number": 1528, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 1528, "usage_type": "name"}, {"api_name": "buildings.gui.error_dialog.ErrorDialog", "line_number": 1545, "usage_type": "call"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.added_by_bulk_load_outline_id_dataset_id", "line_number": 1555, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1555, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.removed_by_existing_outline_id_dataset_id", "line_number": 1562, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1562, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.matched_by_bulk_load_outline_id_dataset_id", "line_number": 1569, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1569, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.matched_by_existing_outline_id_dataset_id", "line_number": 1579, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1579, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.related_by_bulk_load_outline_id_dataset_id", "line_number": 1590, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1590, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.related_by_existing_outline_id_dataset_id", "line_number": 1610, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1610, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1624, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1626, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QListWidgetItem", "line_number": 1758, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 1771, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.Qt.ItemIsEnabled", "line_number": 1776, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 1776, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1780, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1782, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1786, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 1788, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.Qt.ItemIsSelectable", "line_number": 1920, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 1920, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.ItemIsEnabled", "line_number": 1920, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1946, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QHeaderView.Stretch", "line_number": 1948, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QHeaderView", "line_number": 1948, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView.SelectRows", "line_number": 1951, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView", "line_number": 1951, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView.SingleSelection", "line_number": 1952, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView", "line_number": 1952, "usage_type": "name"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.related_by_dataset_id", "line_number": 1960, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1960, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1974, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1975, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1976, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1977, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1978, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1979, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1980, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 1981, "usage_type": "call"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.matched_by_dataset_id", "line_number": 1987, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 1987, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2000, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2001, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2002, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2003, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2004, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2005, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2006, "usage_type": "call"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.removed_by_dataset_id", "line_number": 2012, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 2012, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2017, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2018, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2019, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2020, "usage_type": "call"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements.added_by_dataset_id", "line_number": 2026, "usage_type": "attribute"}, {"api_name": "buildings.sql.buildings_bulk_load_select_statements", "line_number": 2026, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2031, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2032, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QTableWidgetItem", "line_number": 2033, "usage_type": "call"}, {"api_name": "qgis.utils.iface.mapCanvas", "line_number": 2070, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 2070, "usage_type": "name"}, {"api_name": "qgis.utils.iface.mapCanvas", "line_number": 2071, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 2071, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView.MultiSelection", "line_number": 2124, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView", "line_number": 2124, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView.MultiSelection", "line_number": 2138, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView", "line_number": 2138, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView.MultiSelection", "line_number": 2149, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QAbstractItemView", "line_number": 2149, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 2175, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 2175, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 2175, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 2176, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 2176, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 2176, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 2177, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.QSize", "line_number": 2178, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 2179, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 2179, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 2179, "usage_type": "name"}, {"api_name": "qgis.utils.iface.registerMainWindowAction", "line_number": 2180, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 2180, "usage_type": "name"}, {"api_name": "qgis.utils.iface.building_toolbar.addAction", "line_number": 2183, "usage_type": "call"}, {"api_name": "qgis.utils.iface.building_toolbar", "line_number": 2183, "usage_type": "attribute"}, {"api_name": "qgis.utils.iface", "line_number": 2183, "usage_type": "name"}, {"api_name": "buildings.utilities.circle_tool.setup_circle", "line_number": 2212, "usage_type": "call"}, {"api_name": "buildings.utilities.circle_tool", "line_number": 2212, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 2215, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 2215, "usage_type": "name"}, {"api_name": "buildings.gui.bulk_load_changes.EditAttribute", "line_number": 2216, "usage_type": "attribute"}, {"api_name": "buildings.gui.bulk_load_changes", "line_number": 2216, "usage_type": "name"}, {"api_name": "buildings.gui.bulk_load_changes.EditGeometry", "line_number": 2223, "usage_type": "attribute"}, {"api_name": "buildings.gui.bulk_load_changes", "line_number": 2223, "usage_type": "name"}, {"api_name": "buildings.gui.bulk_load_changes.AddBulkLoad", "line_number": 2230, "usage_type": "attribute"}, {"api_name": "buildings.gui.bulk_load_changes", "line_number": 2230, "usage_type": "name"}, {"api_name": "buildings.utilities.point_tool.PointTool", "line_number": 2245, "usage_type": "argument"}, {"api_name": "qgis.utils.iface.actionPan", "line_number": 2249, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 2249, "usage_type": "name"}, {"api_name": "qgis.utils.iface.actionCancelEdits", "line_number": 2251, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 2251, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 2253, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 2253, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 2255, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 2255, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 2261, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 2261, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 2276, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 2276, "usage_type": "name"}, {"api_name": "qgis.utils.iface.layerTreeView", "line_number": 2277, "usage_type": "call"}, {"api_name": "qgis.utils.iface", "line_number": 2277, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.DisplayRole", "line_number": 2279, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2279, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.Unchecked", "line_number": 2280, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2280, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.CheckStateRole", "line_number": 2280, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt.Checked", "line_number": 2281, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2281, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.CheckStateRole", "line_number": 2281, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt.DisplayRole", "line_number": 2283, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2283, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.Unchecked", "line_number": 2285, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2285, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.CheckStateRole", "line_number": 2285, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt.Checked", "line_number": 2286, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2286, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.CheckStateRole", "line_number": 2286, "usage_type": "attribute"}]}