The regular expressions pointed out by Ned and cheeseinvert don't take into account when the match is inside a string.
See the following example (using cheeseinvert's solution):
>>> fixLazyJsonWithRegex ('{ key : "a { a : b }", }')
'{ "key" : "a { "a": b }" }'
The problem is that the expected output is:
'{ "key" : "a { a : b }" }'
Since JSON tokens are a subset of python tokens, we can use python's tokenize module.
Please correct me if I'm wrong, but the following code will fix a lazy json string in all the cases:
import tokenize
import token
from StringIO import StringIO
def fixLazyJson (in_text):
tokengen = tokenize.generate_tokens(StringIO(in_text).readline)
result = []
for tokid, tokval, _, _, _ in tokengen:
# fix unquoted strings
if (tokid == token.NAME):
if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
tokid = token.STRING
tokval = u'"%s"' % tokval
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
# remove invalid commas
elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
if (len(result) > 0) and (result[-1][1] == ','):
result.pop()
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
result.append((tokid, tokval))
return tokenize.untokenize(result)
So in order to parse a json string, you might want to encapsulate a call to fixLazyJson once json.loads fails (to avoid performance penalties for well-formed json):
import json
def json_decode (json_string, *args, **kwargs):
try:
json.loads (json_string, *args, **kwargs)
except:
json_string = fixLazyJson (json_string)
json.loads (json_string, *args, **kwargs)
The only problem I see when fixing lazy json, is that if the json is malformed, the error raised by the second json.loads won't be referencing the line and column from the original string, but the modified one.
As a final note I just want to point out that it would be straightforward to update any of the methods to accept a file object instead of a string.
BONUS: Apart from this, people usually likes to include C/C++ comments when json is used for
configuration files, in this case, you can either remove comments using a regular expression, or use the extended version and fix the json string in one pass:
import tokenize
import token
from StringIO import StringIO
def fixLazyJsonWithComments (in_text):
""" Same as fixLazyJson but removing comments as well
"""
result = []
tokengen = tokenize.generate_tokens(StringIO(in_text).readline)
sline_comment = False
mline_comment = False
last_token = ''
for tokid, tokval, _, _, _ in tokengen:
# ignore single line and multi line comments
if sline_comment:
if (tokid == token.NEWLINE) or (tokid == tokenize.NL):
sline_comment = False
continue
# ignore multi line comments
if mline_comment:
if (last_token == '*') and (tokval == '/'):
mline_comment = False
last_token = tokval
continue
# fix unquoted strings
if (tokid == token.NAME):
if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
tokid = token.STRING
tokval = u'"%s"' % tokval
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
# remove invalid commas
elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
if (len(result) > 0) and (result[-1][1] == ','):
result.pop()
# detect single-line comments
elif tokval == "//":
sline_comment = True
continue
# detect multiline comments
elif (last_token == '/') and (tokval == '*'):
result.pop() # remove previous token
mline_comment = True
continue
result.append((tokid, tokval))
last_token = tokval
return tokenize.untokenize(result)