-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathetl.py
118 lines (88 loc) · 3.8 KB
/
etl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
def process_song_file(cur, filepath):
"""
- Create a Pandas dataframe from a filepath with song data.
- Select fields according to song dimension table and insert into db.
"""
# open song file
df = pd.read_json(filepath, typ='series')
# insert song record
song_data = df.loc[['song_id', 'title', 'artist_id', 'year', 'duration']].T.values.tolist()
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = df.loc[
['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']].T.values.tolist()
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
"""
- Create a Pandas dataframe from a filepath with log (user interaction) data filtering by NextSong event.
- Create a sub dataframe with timestamp data to extract granularity to populate time dimension table.
- Select fields related to user from log dataframe to populate user dimension table.
- The song_play data is populated in fact table from log data and the ids of song and
artist tables retrieved previously.
"""
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df[df['page'] == "NextSong"]
# convert timestamp column to datetime
t = df['ts'].apply(pd.Timestamp, **{'unit': 'ms'}).sort_values()
# insert time data records
time_data = ([tso, tsi.hour, tsi.day, tsi.weekofyear, tsi.month, tsi.year, tsi.dayofweek]
for tsi, tso in zip(t, df['ts']))
column_labels = ('timestamp', 'hour', 'day', 'weekofyear', 'month', 'year', 'weekday')
time_df = pd.DataFrame(data=time_data, columns=column_labels)
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
# load user table
user_df = df[['userId', 'firstName', 'lastName', 'gender', 'level']]
# insert user records
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
# insert songplay records
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
# insert songplay record
songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
"""
This methods lookup the available json files on a directory and process the data found
according the function as parameter.
"""
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root, '*.json'))
for f in files :
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
"""
Connect to sink database.
Process the song and log data.
"""
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main()