-
Notifications
You must be signed in to change notification settings - Fork 0
/
template.py
256 lines (196 loc) · 7.85 KB
/
template.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
try:
import ast, sys, datetime, re, os, json
from ast import literal_eval
import boto3
from pyspark.sql import SparkSession
from pyspark import SparkConf, SparkContext
from dataclasses import dataclass
except Exception as e:
pass
class AWSS3(object):
"""Helper class to which add functionality on top of boto3 """
def __init__(self, bucket):
self.BucketName = bucket
self.client = boto3.client("s3")
def put_files(self, Response=None, Key=None):
"""
Put the File on S3
:return: Bool
"""
try:
response = self.client.put_object(
Body=Response, Bucket=self.BucketName, Key=Key
)
return "ok"
except Exception as e:
raise Exception("Error : {} ".format(e))
def item_exists(self, Key):
"""Given key check if the items exists on AWS S3 """
try:
response_new = self.client.get_object(Bucket=self.BucketName, Key=str(Key))
return True
except Exception as e:
return False
def get_item(self, Key):
"""Gets the Bytes Data from AWS S3 """
try:
response_new = self.client.get_object(Bucket=self.BucketName, Key=str(Key))
return response_new["Body"].read()
except Exception as e:
print("Error :{}".format(e))
return False
def find_one_update(self, data=None, key=None):
"""
This checks if Key is on S3 if it is return the data from s3
else store on s3 and return it
"""
flag = self.item_exists(Key=key)
if flag:
data = self.get_item(Key=key)
return data
else:
self.put_files(Key=key, Response=data)
return data
def delete_object(self, Key):
response = self.client.delete_object(Bucket=self.BucketName, Key=Key, )
return response
def get_all_keys(self, Prefix=""):
"""
:param Prefix: Prefix string
:return: Keys List
"""
try:
paginator = self.client.get_paginator("list_objects_v2")
pages = paginator.paginate(Bucket=self.BucketName, Prefix=Prefix)
tmp = []
for page in pages:
for obj in page["Contents"]:
tmp.append(obj["Key"])
return tmp
except Exception as e:
return []
def print_tree(self):
keys = self.get_all_keys()
for key in keys:
print(key)
return None
def find_one_similar_key(self, searchTerm=""):
keys = self.get_all_keys()
return [key for key in keys if re.search(searchTerm, key)]
def __repr__(self):
return "AWS S3 Helper class "
@dataclass
class HUDISettings:
"""Class for keeping track of an item in inventory."""
table_name: str
path: str
class HUDIIncrementalReader(AWSS3):
def __init__(self, bucket, hudi_settings, spark_session):
AWSS3.__init__(self, bucket=bucket)
if type(hudi_settings).__name__ != "HUDISettings": raise Exception("please pass correct settings ")
self.hudi_settings = hudi_settings
self.spark = spark_session
def __check_meta_data_file(self):
"""
check if metadata for table exists
:return: Bool
"""
file_name = f"metadata/{self.hudi_settings.table_name}.json"
return self.item_exists(Key=file_name)
def __read_meta_data(self):
file_name = f"metadata/{self.hudi_settings.table_name}.json"
return ast.literal_eval(self.get_item(Key=file_name).decode("utf-8"))
def __push_meta_data(self, json_data):
file_name = f"metadata/{self.hudi_settings.table_name}.json"
self.put_files(
Key=file_name, Response=json.dumps(json_data)
)
def clean_check_point(self):
file_name = f"metadata/{self.hudi_settings.table_name}.json"
self.delete_object(Key=file_name)
def __get_begin_commit(self):
self.spark.read.format("hudi").load(self.hudi_settings.path).createOrReplaceTempView("hudi_snapshot")
commits = list(map(lambda row: row[0], self.spark.sql(
"select distinct(_hoodie_commit_time) as commitTime from hudi_snapshot order by commitTime asc").limit(
50).collect()))
"""begin from start """
begin_time = int(commits[0]) - 1
return begin_time
def __read_inc_data(self, commit_time):
incremental_read_options = {
'hoodie.datasource.query.type': 'incremental',
'hoodie.datasource.read.begin.instanttime': commit_time,
}
incremental_df = self.spark.read.format("hudi").options(**incremental_read_options).load(
self.hudi_settings.path).createOrReplaceTempView("hudi_incremental")
df = self.spark.sql("select * from hudi_incremental")
return df
def __get_last_commit(self):
commits = list(map(lambda row: row[0], self.spark.sql(
"select distinct(_hoodie_commit_time) as commitTime from hudi_incremental order by commitTime asc").limit(
50).collect()))
last_commit = commits[len(commits) - 1]
return last_commit
def __run(self):
"""Check the metadata file"""
flag = self.__check_meta_data_file()
"""if metadata files exists load the last commit and start inc loading from that commit """
if flag:
meta_data = json.loads(self.__read_meta_data())
print(f"""
******************LOGS******************
meta_data {meta_data}
last_processed_commit : {meta_data.get("last_processed_commit")}
***************************************
""")
read_commit = str(meta_data.get("last_processed_commit"))
df = self.__read_inc_data(commit_time=read_commit)
"""if there is no INC data then it return Empty DF """
if not df.rdd.isEmpty():
last_commit = self.__get_last_commit()
self.__push_meta_data(json_data=json.dumps({
"last_processed_commit": last_commit,
"table_name": self.hudi_settings.table_name,
"path": self.hudi_settings.path,
"inserted_time": datetime.now().__str__(),
}))
return df
else:
return df
else:
"""Metadata files does not exists meaning we need to create metadata file on S3 and start reading from begining commit"""
read_commit = self.__get_begin_commit()
df = self.__read_inc_data(commit_time=read_commit)
last_commit = self.__get_last_commit()
self.__push_meta_data(json_data=json.dumps({
"last_processed_commit": last_commit,
"table_name": self.hudi_settings.table_name,
"path": self.hudi_settings.path,
"inserted_time": datetime.now().__str__(),
}))
return df
def read(self):
"""
reads INC data and return Spark Df
:return:
"""
return self.__run()
def main():
bucket = 'BUCKET THAT WILL BE USE FOR CHECKPOINT PURPOSES'
table_name = "<TABLE NAME GOES HERE>"
path = f"PATH TO HUDI DATALAKE"
spark = SparkSession.builder \
.config('spark.serializer', 'org.apache.spark.serializer.KryoSerializer') \
.config('spark.sql.extensions', 'org.apache.spark.sql.hudi.HoodieSparkSessionExtension') \
.config('className', 'org.apache.hudi') \
.config('spark.sql.hive.convertMetastoreParquet', 'false') \
.getOrCreate()
helper = HUDIIncrementalReader(
bucket=bucket,
hudi_settings=HUDISettings(
table_name=table_name,
path=path),
spark_session=spark
)
df = helper.read()
main()