!gcloud dataproc clusters create ex-dataproc --enable-component-gateway --region us-central1 --zone us-central1-c --master-machine-type n1-standard-4 --master-boot-disk-size 500 --num-workers 2 --worker-machine-type n1-standard-4 --worker-boot-disk-size 500 --image-version 2.0-debian10 --optional-components JUPYTER --project dataproc-334718
!gcloud compute ssh ex-dataproc-m --project=dataproc-334718 --zone=us-central1-c -- -D 1080 -N
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome" --proxy-server="socks5://localhost:1080" --user-data-dir="/tmp/ex-dataproc-m" http://ex-dataproc-m:8088
!gsutil mb -p dataproc-334718 -b on gs://ex-dataproc-bucket
!gsutil cp nlpDisasterTweets.csv gs://ex-dataproc-bucket
data = spark.read.format('csv').options(header='true', inferSchema='true', multiLine=True).load("gs://ex-dataproc-bucket/nlpDisasterTweets.csv")
print('Number of row in Data:', data.count())
data.show(5)