Cross notes: === http://www.openkb.info/2015/01/scala-on-spark-cheatsheet.html https://www.datacamp.com/community/tutorials/python-data-profiling ``` import pandas as pd import cx_Oracle import logging import Config as config class OracleDBConnector: @classmethod def get_oracle_connection(cls, query, mode): logging.info("Executing ----- " + query) df = pd.DataFrame() connect_string = '''{}/{}@{}:{}/{}'''.format(config.ora_user, config.ora_pwd, config.ora_host, config.ora_port, config.ora_sid) try: connection = cx_Oracle.connect(connect_string) if mode == "read": df = pd.read_sql(query, con=connection) connection.commit() else: cursor = connection.cursor() cursor.execute(query) connection.commit() except cx_Oracle.DatabaseError as e: if connection: connection.rollback() logging.error("There is a problem with sql", e) return df ``` ``` for reading you use like this: df_cnt = OracleDBConnector.get_oracle_connection(cnt_query, "read") for any other operations OracleDBConnector.get_oracle_connection(insrt_query, "insert") OracleDBConnector.get_oracle_connection(query, "create") ``` Join Zoom Meeting https://us04web.zoom.us/j/78308869527?pwd=eDZNMlVzNHdwUzNtOEk3SHgwZFBBdz09 Meeting ID: 783 0886 9527 Passcode: kryav0 input_period_id=211115 input_attribute_base_dir=/spgdev-attribute/data/attribute/data/ input_executor_memory=2G input_driver_memory=1G input_step_number=1 input_executor_cores=2 input_class=com.epsilon.hadoop.reference.build.attribute.CreateAttributeFilespark input_pii_base dir=/spgdev-pii/data/pii/data/ input_num_executors=2 input_mapping_base_ dir=/spgdev-mapping/data/mapping/data/ input_conf_memory_overhead=spark.executor.memoryoverhead=1G -bash-4.2$ nohup /run build ref file job.groovy &
×
Sign in
Email
Password
Forgot password
or
By clicking below, you agree to our
terms of service
.
Sign in via Facebook
Sign in via Twitter
Sign in via GitHub
Sign in via Dropbox
Sign in with Wallet
Wallet (
)
Connect another wallet
New to HackMD?
Sign up