1

Using this link, I was able to create a Windows service using python and it runs great. However, when I try to get it to start utilizing the GPU (to interact with tensorflow) it will not see the GPU inside of the Service class.

Here is code that I am using to test the windows service.


import sys
import os
import os.path

import numpy as np
from PIL import Image

import random
#from scipy.misc import imread, imsave
import tensorflow as tf

from tensorflow import keras

from tensorflow.python.keras.layers import concatenate, Lambda, Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, \
        BatchNormalization, Activation, GlobalAveragePooling2D, SeparableConv2D, Reshape,  Conv2DTranspose

import queue
from queue import Queue
#import keras

import pika
import json
import base64
import time

from six.moves import configparser
import threading
from threading import Lock, Thread

import win32serviceutil
import win32service
import win32event
import servicemanager
import logging
import logging.handlers

log_file = "C:\\loglocation\\GPUService.log"
configLocation="D:\\configlocation\\config.ini"


# Return a logger with the specified name.
mylogger = logging.getLogger("MyLogger")
 
# Sets the threshold for this logger to lvl. Logging messages which are less severe than lvl will be ignored.
mylogger.setLevel(logging.DEBUG)
 
# Sets rotation parameters of disk log files
# https://docs.python.org/3.4/library/logging.handlers.html#rotatingfilehandler
handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=10485760, backupCount=2)
 
# Sets format of record in log file
formatter = logging.Formatter('%(asctime)s - %(module)-10s - %(levelname)-8s %(message)s', '%d-%m-%Y %H:%M:%S')
handler.setFormatter(formatter)
 
# Adds the specified handler to logger "MyLogger"
mylogger.addHandler(handler)

SEED = 42
np.random.seed(SEED)
random.seed(SEED)

class AAAService(win32serviceutil.ServiceFramework):
    _svc_name_ = "AAATestService"
    _svc_display_name_ = "Test Service"
    def __init__(self, args):
        mylogger.info("*** INIT SERVICE ***\n")        
        win32serviceutil.ServiceFramework.__init__(self, args)
        # Create an event which we will use to wait on.
        # The "service stop" request will set this event.
        self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
        self.stop_requested=False


    def SvcStop(self):
        # Before we do anything, tell the SCM we are starting the stop process.
        self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
        mylogger.info("*** STOP SERVICE ***\n")        

        # And set my event.
        win32event.SetEvent(self.hWaitStop)
        self.stop_requested=True

    def SvcDoRun(self):
        # We do nothing other than wait to be stopped!
        mylogger.info("*** STARTING SERVICE ***\n")
        self.main()
        win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)


    def main(self):
        mylogger.info("... Can I see a GPU? ...\n")
        mylogger.info(tf.config.list_physical_devices('GPU'))

        sys_details = tf.sysconfig.get_build_info()
        cuda_version = sys_details["cuda_version"]
        mylogger.info("This is my cuda version: " + cuda_version)
  
          
        cudnn_version = sys_details["cudnn_version"]  
        mylogger.info("CUDNN_VERSION: "+ cudnn_version)

        cuda_compute_capabilities = sys_details["cuda_compute_capabilities"]  
        mylogger.info(cuda_compute_capabilities)
  
        mylogger.info("... STARTING SCHEDULE PROCESS ...\n")

        mylogger.info("... Waiting for reponse messages ...\n")

        # while not self.stop_requested:
        #     # execute task on schedule
        #     mylogger.info("... task ...\n")
        #     time.sleep(10)
        return


if __name__=='__main__':
    if len(sys.argv) == 1:
        servicemanager.Initialize()
        servicemanager.PrepareToHostSingle(AAAService)
        servicemanager.StartServiceCtrlDispatcher()
    else:
        
        win32serviceutil.HandleCommandLine(AAAService)
        mylogger.info("... 1Can I see a GPU? ...\n")
        mylogger.info(tf.config.list_physical_devices('GPU'))

        sys_details = tf.sysconfig.get_build_info()
        cuda_version = sys_details["cuda_version"]
        mylogger.info("1This is my cuda version: " + cuda_version)
  
          
        cudnn_version = sys_details["cudnn_version"]  
        mylogger.info("1CUDNN_VERSION: "+ cudnn_version)

        cuda_compute_capabilities = sys_details["cuda_compute_capabilities"]  
        mylogger.info(cuda_compute_capabilities)
  
        mylogger.info("... 1STARTING SCHEDULE PROCESS ...\n")

        mylogger.info("... 1Waiting for reponse messages ...\n")


And here is my conda list to show my environment

#
# Name                    Version                   Build  Channel
absl-py                   0.15.0                   pypi_0    pypi
altgraph                  0.17               pyhd3eb1b0_0
astunparse                1.6.3                    pypi_0    pypi
ca-certificates           2021.10.8            h5b45459_0    conda-forge
cachetools                5.0.0                    pypi_0    pypi
certifi                   2021.10.8        py39hcbf5309_1    conda-forge
charset-normalizer        2.0.12                   pypi_0    pypi
flatbuffers               1.12                     pypi_0    pypi
freetype                  2.10.4               hd328e21_0
future                    0.18.2           py39haa95532_1
gast                      0.4.0                    pypi_0    pypi
google-auth               2.6.0                    pypi_0    pypi
google-auth-oauthlib      0.4.6                    pypi_0    pypi
google-pasta              0.2.0                    pypi_0    pypi
grpcio                    1.34.1                   pypi_0    pypi
h5py                      3.1.0                    pypi_0    pypi
idna                      3.3                      pypi_0    pypi
importlib-metadata        4.11.1                   pypi_0    pypi
jpeg                      9d                   h2bbff1b_0
keras-nightly             2.5.0.dev2021032900          pypi_0    pypi
keras-preprocessing       1.1.2                    pypi_0    pypi
libpng                    1.6.37               h2a8f88b_0
libtiff                   4.2.0                hd0e1b90_0
libwebp                   1.2.0                h2bbff1b_0
lz4-c                     1.9.3                h2bbff1b_1
macholib                  1.14               pyhd3eb1b0_1
markdown                  3.3.6                    pypi_0    pypi
numpy                     1.19.5                   pypi_0    pypi
oauthlib                  3.2.0                    pypi_0    pypi
olefile                   0.46               pyhd3eb1b0_0
openssl                   1.1.1l               h8ffe710_0    conda-forge
opt-einsum                3.3.0                    pypi_0    pypi
pefile                    2019.4.18                  py_0
pika                      1.2.0              pyh44b312d_0    conda-forge
pillow                    8.4.0            py39hd45dc43_0
pip                       21.2.4           py39haa95532_0
protobuf                  3.19.4                   pypi_0    pypi
pyasn1                    0.4.8                    pypi_0    pypi
pyasn1-modules            0.2.8                    pypi_0    pypi
pycryptodome              3.12.0           py39h2bbff1b_0
pyinstaller               3.6              py39h8cc25b3_6
python                    3.9.7                h6244533_1
python_abi                3.9                      2_cp39    conda-forge
pywin32                   302              py39h827c3e9_1
pywin32-ctypes            0.2.0           py39haa95532_1000
requests                  2.27.1                   pypi_0    pypi
requests-oauthlib         1.3.1                    pypi_0    pypi
rsa                       4.8                      pypi_0    pypi
setuptools                58.0.4           py39haa95532_0
six                       1.15.0                   pypi_0    pypi
sqlite                    3.37.2               h2bbff1b_0
tensorboard               2.8.0                    pypi_0    pypi
tensorboard-data-server   0.6.1                    pypi_0    pypi
tensorboard-plugin-wit    1.8.1                    pypi_0    pypi
tensorflow-estimator      2.5.0                    pypi_0    pypi
tensorflow-gpu            2.5.0                    pypi_0    pypi
termcolor                 1.1.0                    pypi_0    pypi
tk                        8.6.11               h2bbff1b_0
typing-extensions         3.7.4.3                  pypi_0    pypi
tzdata                    2021e                hda174b7_0
urllib3                   1.26.8                   pypi_0    pypi
vc                        14.2                 h21ff451_1
vs2015_runtime            14.27.29016          h5e58377_2
werkzeug                  2.0.3                    pypi_0    pypi
wheel                     0.37.1             pyhd3eb1b0_0
wincertstore              0.2              py39haa95532_2
wrapt                     1.12.1                   pypi_0    pypi
xz                        5.2.5                h62dcd97_0
zipp                      3.7.0                    pypi_0    pypi
zlib                      1.2.11               h8cc25b3_4
zstd                      1.4.9                h19a0ad4_0

When I install or start the service, my log is showing the GPU is appropriately being recognized in the __main__ function, but when I get to the service class, the GPU is no longer seen.

The service still sees the appropriate version of cuda as well, here is a snapshot from the log


02-03-2022 10:36:09 - GPUService - INFO     ... 1Can I see a GPU? ...

02-03-2022 10:36:09 - GPUService - INFO     [PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
02-03-2022 10:36:09 - GPUService - INFO     1This is my cuda version: 64_112
02-03-2022 10:36:09 - GPUService - INFO     1CUDNN_VERSION: 64_8
02-03-2022 10:36:09 - GPUService - INFO     ['sm_35', 'sm_50', 'sm_60', 'sm_70', 'sm_75', 'compute_80']
02-03-2022 10:36:09 - GPUService - INFO     ... 1STARTING SCHEDULE PROCESS ...

02-03-2022 10:36:09 - GPUService - INFO     ... 1Waiting for reponse messages ...

02-03-2022 10:36:11 - GPUService - INFO     *** INIT SERVICE ***

02-03-2022 10:36:11 - GPUService - INFO     *** STARTING SERVICE ***

02-03-2022 10:36:11 - GPUService - INFO     ... Can I see a GPU? ...

02-03-2022 10:36:11 - GPUService - INFO     []
02-03-2022 10:36:11 - GPUService - INFO     This is my cuda version: 64_112
02-03-2022 10:36:11 - GPUService - INFO     CUDNN_VERSION: 64_8
02-03-2022 10:36:11 - GPUService - INFO     ['sm_35', 'sm_50', 'sm_60', 'sm_70', 'sm_75', 'compute_80']
02-03-2022 10:36:11 - GPUService - INFO     ... STARTING SCHEDULE PROCESS ...

02-03-2022 10:36:11 - GPUService - INFO     ... Waiting for reponse messages ...

I have another script that is just testing out the GPU within this conda env and it sees the GPU just fine.

I think it has something to do with the changing of context once it enters the service.

  • Hi! Did you check this thread on using tf.device() ? https://www.tensorflow.org/guide/gpu –  Mar 03 '22 at 07:20
  • @TensorflowSupport thanks for the comment! If you look at the code, I am using `tf.config.list_physical_devices('GPU')` as recommended in that article to determine where in my code I am losing GPU support. – William Monroe Mar 03 '22 at 16:01

0 Answers0