-
Notifications
You must be signed in to change notification settings - Fork 7
/
etl.py
107 lines (80 loc) · 3.64 KB
/
etl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import os
import glob
import psycopg2
import pandas as pd
import numpy as np
from sql_queries import *
# cur parameter: actual connection to DATABASE
# filepath parameter: JSON file path
# this function receive a connection and json path parameters.
# read the information using pandas and insert into database.
def process_song_file(cur, filepath):
# open song file
df = pd.read_json(filepath, lines=True)
# insert song record
song_data = df[['song_id', 'title', 'artist_id', 'year',
'duration']].values[0].tolist()
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = df[['artist_id', 'artist_name', 'artist_location',
'artist_latitude', 'artist_longitude']].values[0].tolist()
cur.execute(artist_table_insert, artist_data)
# this function receive a connection and json path parameters.
# read the information using pandas and insert into database.
def process_log_file(cur, filepath):
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df[df['page'] == 'NextSong']
# convert timestamp column to datetime
t = pd.to_datetime(df['ts'], unit='ms')
# insert time data records
time_data = np.transpose(np.array([df['ts'].values, t.dt.hour.values, t.dt.day.values, t.dt.week.values,
t.dt.month.values, t.dt.year.values, t.dt.weekday.values]))
column_labels = ('ts', 'hour', 'day', 'week', 'month', 'year', 'weekday')
time_df = pd.DataFrame(data=time_data, columns=column_labels)
# iterate into every row and create a insert statement for every row
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
# load user table
user_df = df[['userId', 'firstName', 'lastName', 'gender', 'level']]
# insert user records
for i, row in user_df.drop_duplicates().iterrows():
cur.execute(user_table_insert, row)
# insert songplay records
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song.encode('utf-8'),
row.artist.encode('utf-8'), row.length))
# retrieve the information by tuples
results = cur.fetchone()
# asing song_id and artist_id into variables
songid, artistid = results if results else (None, None)
# insert songplay record
songplay_data = (row.ts, row.userId, row.level, songid, artistid,
row.sessionId, row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
# iterate into every directory and retrieve json files to process data
def process_data(cur, conn, filepath, func):
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root, '*.json'))
for f in files:
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main()