@article{scholars20093, doi = {10.22967/HCIS.2024.14.022}, year = {2024}, volume = {14}, note = {cited By 1}, title = {Wireless Power Assisted Computation Offloading in Mobile Edge Computing: A Deep Reinforcement Learning Approach}, journal = {Human-centric Computing and Information Sciences}, abstract = {One of the missions of mobile edge computing (MEC) is to develop innovative applications in closer proximity while preserving latency constraints. Integration of MEC with the emerging wireless power transfer (WPT) approach extends the battery lifetime of end devices. Fading channels with time-varying constraints limit this procedure. We propose a joint deep reinforcement learning based framework that effectively initiates the worthy offloading solution with optimal WPT duration. To solve the joint optimization non-convex problem, we decompose the original problem into two sub-problems including the selection of an efficient offloading solution and allocation of optimal WPT duration. We consider orthogonal frequency division multiple access (OFDMA) as a channel access method. As the hyperparameters of a neural network largely influence its performance, we also simulate and analyze different variations of these parameters for system performance. Furthermore, we also reduce the overhead of task partitioning which usually involves in partial offloading schemes with DNNs. Simulation results show the effectiveness and applicability of the proposed framework in dynamic networks. For example, the proposed framework provides an average execution latency of 372.3 ms per channel in a 30-user network, making the system truly applicable in large scale dynamic networks. {\^A}{\copyright} (2023), (Korea Information Processing Society). All Rights Reserved.}, keywords = {Computation offloading; Deep learning; Fading channels; Frequency division multiple access; Inductive power transmission; Mobile edge computing; Orthogonal frequency division multiplexing; Reinforcement learning; Resource allocation, Computation offloading; Deep learning; Dynamic channels; Dynamic network; Mobile edge computing; Neural-networks; Reinforcement learning approach; Transfer duration; Wireless power; Wireless power transfer, Energy transfer}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85189106157&doi=10.22967\%2fHCIS.2024.14.022&partnerID=40&md5=23b5f02b32d37655e784aed071ddf163}, author = {Maray, M. and Mustafa, E. and Shuja, J.} }