I have a directory full of json
files, like so:
json/
checkpoint_01.json
checkpoint_02.json
...
checkpoint_100.json
where each file has thousands of json
objects, dumped line by line.
{"playlist_id": "37i9dQZF1DZ06evO2dqn7O", "user_id": "spotify", "sentence": ["Lil Wayne", "Wiz Khalifa", "Imagine Dragons", "Logic", "Ty Dolla $ign", "X Ambassadors", "Machine Gun Kelly", "X Ambassadors", "Bebe Rexha", "X Ambassadors", "Jamie N Commons", "X Ambassadors", "Eminem", "X Ambassadors", "Jamie N Commons", "Skylar Grey", "X Ambassadors", "Zedd", "Logic", "X Ambassadors", "Imagine Dragons", "X Ambassadors", "Jamie N Commons", "A$AP Ferg", "X Ambassadors", "Tom Morello", "X Ambassadors", "The Knocks", "X Ambassadors"]}
{"playlist_id": "37i9dQZF1DZ06evO1A0kr6", "user_id": "spotify", "sentence": ["RY X", "ODESZA", "RY X", "Thomas Jack", "RY X", "Rhye", "RY X"]}
(...)
I know I can combine all files into one, like so:
def combine():
read_files = glob.glob("*.json")
with open("merged_playilsts.json", "wb") as outfile:
outfile.write('[{}]'.format(
','.join([open(f, "rb").read() for f in read_files])))
but at the end I need to parse one big json
file, using the following script:
parser.py
"""
Passes extraction output into `word2vec`
and prints results as JSON.
"""
from __future__ import absolute_import, unicode_literals
import json
import click
from numpy import array as np_array
import gensim
class LineGenerator(object):
"""Reads a sentence file, yields numpy array-wrapped sentences
"""
def __init__(self, fh):
self.fh = fh
def __iter__(self):
for line in self.fh.readlines():
yield np_array(json.loads(line)['sentence'])
def serialize_rankings(rankings):
"""Returns a JSON-encoded object representing word2vec's
similarity output.
"""
return json.dumps([
{'artist': artist, 'rel': rel}
for (artist, rel)
in rankings
])
@click.command()
@click.option('-i', 'input_file', type=click.File('r', encoding='utf-8'),
required=True)
@click.option('-t', 'term', required=True)
@click.option('--min-count', type=click.INT, default=5)
@click.option('-w', 'workers', type=click.INT, default=4)
def cli(input_file, term, min_count, workers):
# create word2vec
model = gensim.models.Word2Vec(min_count=min_count, workers=workers)
model.build_vocab(LineGenerator(input_file))
try:
similar = model.most_similar(term)
click.echo( serialize_rankings(similar) )
except KeyError:
# really wish this was a more descriptive error
exit('Could not parse input: {}'.format(exc))
if __name__ == '__main__':
cli()
QUESTION:
So, how do I combine ALL json
objects from json/
folder into one single file, ending up with one json
object per line?
Note: memory is an issue here, because all files amount to 4 gigabytes.