윤혜원

bs selection algorithm and save internet activity predictions

No preview for this file type
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Unnamed: 0 gridID Internet Activity Prediction\n",
"0 0 1 9952.919155\n",
"1 0 2 10007.809479\n",
"2 0 3 12864.899626\n"
]
}
],
"source": [
"import pandas as pd\n",
"df = pd.read_csv('predictions.csv')\n",
"#sort internet activity prediction\n",
"df.sort_values(by=['Internet Activity Prediction'],inplace=True)\n",
"print(df)\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"g1 cache remainder size 137\n",
"g2 cache remainder size 582\n",
"g3 cache remainder size 867\n",
"contents size 821\n",
"[[3, 867]]\n"
]
}
],
"source": [
"from random import seed\n",
"from random import randint\n",
"seed(1)\n",
"g1_cache=randint(0,1000)\n",
"g2_cache=randint(0,1000)\n",
"g3_cache=randint(0,1000)\n",
"\n",
"contents_cache=randint(0,1000)\n",
"\n",
"print(\"g1 cache remainder size\" , g1_cache)\n",
"print(\"g2 cache remainder size\" ,g2_cache)\n",
"print(\"g3 cache remainder size\" ,g3_cache)\n",
"print(\"contents size\" ,contents_cache)\n",
"\n",
"l_bs_cache=[]\n",
"if(contents_cache<g1_cache):\n",
" arr=[1,g1_cache]\n",
" l_bs_cache.append(arr)\n",
"if(contents_cache<g2_cache):\n",
" arr=[2,g2_cache]\n",
" l_bs_cache.append(arr)\n",
"if(contents_cache<g3_cache):\n",
" arr=[3,g3_cache]\n",
" l_bs_cache.append(arr)\n",
"\n",
"print(l_bs_cache)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1, 9.18594536053351, 45.47304470082458]\n",
"[2, 9.193139700323409, 45.46987577194055]\n",
"[3, 9.195377006955539, 45.46770952221271]\n"
]
}
],
"source": [
"import random\n",
"import sys\n",
"import math\n",
"#milan lat,lon\n",
"latitude = 45.4654219\n",
"longitude = 9.1859243\n",
"bs_locations=[]\n",
"def generate_random_data(lat, lon):\n",
" for i in range(1,4):\n",
" dec_lat = random.random()/100\n",
" dec_lon = random.random()/100\n",
" arr=[i, lon+dec_lon, lat+dec_lat]\n",
" print(arr)\n",
" bs_locations.append(arr)\n",
"\n",
"generate_random_data(latitude, longitude)\n"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(9.194398637369373, 45.466765542441124)\n",
"[[3, 0.1499061393976395, 867]]\n",
" gridID distance internet traffic prediction\n",
"0 3 0.149906 867\n",
"0.1499061393976395\n",
"bs selection result: 3\n"
]
}
],
"source": [
"import geopy.distance\n",
"seed(1)\n",
"dec_lat = random.random()/100\n",
"dec_lon = random.random()/100\n",
"request_location=(longitude+dec_lon, latitude+dec_lat)\n",
"print(request_location)\n",
"\n",
"bs_distances=[]\n",
"for bs in l_bs_cache:\n",
" grid_num=bs[0]\n",
" for l in bs_locations:\n",
" if(l[0]==grid_num):\n",
" coords_1=(l[1],l[2])\n",
" dist=geopy.distance.geodesic(coords_1, request_location).km\n",
" arr=[grid_num,dist,bs[1]]\n",
" bs_distances.append(arr)\n",
"#[gridID, distance, traffic]\n",
"print(bs_distances)\n",
"\n",
"#sort bs_distances\n",
"col=['gridID','distance','internet traffic prediction']\n",
"df=pd.DataFrame(bs_distances, columns=col)\n",
"df.sort_values(by=['distance'], inplace=True)\n",
"print(df)\n",
"\n",
"min_value=df['distance'][0]\n",
"grid_num=df['gridID'][0]\n",
"print(min_value)\n",
"count_row=df.shape[0]\n",
"for x in range(count_row):\n",
" if(df.loc[x]['distance']<min_value):\n",
" min_value=df.loc[x]['distance']\n",
" grid_num=df.loc[x]['gridID']\n",
" \n",
"print(\"bs selection result: \",grid_num)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
import pandas as pd
from tqdm import tqdm
data=open("sms-call-internet-mi-2013-11-01.txt").read()
lines_of_data=data.splitlines()
tmp = []
for i in tqdm(range(len(lines_of_data))):
tmp.append(lines_of_data[i].split())
data_df = pd.DataFrame(tmp)
data_df.to_csv('dataset.csv')
data_df.shape
data_df.ndim
,gridID,Internet Activity Prediction
0,1,9952.919154515539
0,2,10007.809478855686
0,3,12864.899626276776
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.