So i have a problem. I'm trying to make my imports faster, so i started using multiprocessing module to split a group of imports into two functions, and then run each on separate core, thus speeding the imports up. But now the code will not recognize the modules at all. What am I doing wrong ?
import multiprocessing
def core1():
import wikipedia
import subprocess
import random
return wikipedia, subprocess, random
def core2():
from urllib import request
import json
import webbrowser
return request, json, webbrowser
if __name__ == "__main__":
start_core_1 = multiprocessing.Process(name='worker 1', target=core1, args = core2())
start_core_2 = multiprocessing.Process(name='worker 2', target=core2, args = core1())
start_core_1.start()
start_core_2.start()
while True:
user = input('[!] ')
with request.urlopen('https://api.wit.ai/message?v=20160511&q=%s&access_token=Z55PIVTSSFOETKSBPWMNPE6YL6HVK4YP' % request.quote(user)) as wit_api: # call to wit.ai api
wit_api_html = wit_api.read()
wit_api_html = wit_api_html.decode()
wit_api_data = json.loads(wit_api_html)
intent = wit_api_data['entities']['Intent'][0]['value']
term = wit_api_data['entities']['search_term'][0]['value']
if intent == 'info_on':
with request.urlopen('https://kgsearch.googleapis.com/v1/entities:search?query=%s&key=AIzaSyCvgNV4G7mbnu01xai0f0k9NL2ito8vY6s&limit=1&indent=True' % term.replace(' ', '%20')) as response:
google_knowledge_base_html = response.read()
google_knowledge_base_html = google_knowledge_base_html.decode()
google_knowledge_base_data = json.loads(google_knowledge_base_html)
print(google_knowledge_base_data['itemListElement'][0]['result']['detailedDescription']['articleBody'])
else:
print('Something')