# 11 May - 15 May 2020 ###### tags: `Study Note` ## Monday, 11 May 2020 the "converter" able to generate the index file. the code quality is not good enough, will focus on finishing the functional first then the code quality. ## Tuesday, 12 May 2020 Text conversion finished, will implement the images for tomorrow. Problems: ![](https://i.imgur.com/VY4vdPI.png) will try to reduce the warning ## Wednesday, 13 May 2020 add the image attachment function to the puthon code ## Thursday, 14 May 2020 - meeting with zhimin - Code: :::spoiler ``` import re f = open('O-RAN_FH_Lib_SAS.rst', encoding="utf8") # Collecting the contents contents_lock = False content_list = [] h1_count = 0 h2_count = 0 h3_count = 0 appendix_count = 0 iter = 1 while(True): iter += 1 lines = f.readline() if (lines != '\n'): if (lines == "Figures\n"): break print(iter) lines = lines.rstrip() # delete new line words = lines.split(' ') # to check wether the line is content list print(words[0]) # check before contents if not(contents_lock): if words[0] == "Contents": contents_lock = True print("udah") continue sections = words[0].split('.') # check if its h1 appendix_rules = [words[0] == "Appendix"] if(all(appendix_rules)): h1_count += 1 content_list.append([h1_count, 0, 0, " ".join(words[0:-1])]) continue h1_rules = [len(sections) == 2, sections[0].isdecimal(), sections[1].isdecimal(), sections[1] == "0"] if all(h1_rules): print("Heading 1") print("CL = ", len(content_list)) h1_count += 1 content_list.append([h1_count, 0, 0, " ".join(words[1:-1])]) h2_count = 0 # clear h2 counts continue append_sub_rules = [len(sections) == 2, sections[0] == "A", sections[1].isdecimal()] if(all(append_sub_rules)): appendix_count += 1 appendix_sub_count = 0 content_list.append([h1_count, appendix_count, 0, " ".join(words[1:-1])]) continue # check if its h2 h2_rules = [len(sections) == 2, sections[0].isdecimal(), sections[1].isdecimal(), sections[1] != "0"] if(all(h2_rules)): print("Heading 2 ") h2_count += 1 content_list.append([h1_count, h2_count, 0, " ".join(words[1:-1])]) h3_count = 0 # clear h3 counts continue # check if its h3 h3_rules = [len(sections) == 3, sections[0].isdecimal(), sections[1].isdecimal(), sections[1] != "0", sections[2].isdecimal()] if(all(h3_rules)): print("Heading 3") h3_count += 1 content_list.append([h1_count, h2_count, h3_count, " ".join(words[1:-1])]) continue append_sub_sub_rules = [sections[0] == "A", sections[1].isdecimal(), sections[2].isdecimal()] if(all(append_sub_rules)): appendix_sub_count += 1 content_list.append([h1_count, appendix_count(), appendix_sub_count(), " ".join(words[1:-1])]) continue content_list.append([0,0,0,"ENDOFFILE"]) for i in range(len(content_list)): print(content_list[i]) # Finish Collecting the contents f.seek(0) # Reset the files # Generate the index file scroll_to = content_list[0][3] index_format = ["Contents", "Revision History"] file_header = """.. Copyright (c) 2019 Intel .. .. Licensed under the Apache License, Version 2.0 (the "License"); .. you may not use this file except in compliance with the License. .. You may obtain a copy of the License at .. .. http://www.apache.org/licenses/LICENSE-2.0 .. .. Unless required by applicable law or agreed to in writing, software .. distributed under the License is distributed on an "AS IS" BASIS, .. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. .. See the License for the specific language governing permissions and .. limitations under the License. """ new_file = open("index.rst", "w", encoding="utf-8") new_file.write(file_header) lines = f.readline() new_file.write(lines) new_file.write("=" * (len(lines)-1)+"\n") lines = f.readline() while(lines != index_format[0]+"\n"): if(lines == "\n"): lines = f.readline() continue new_file.write("| " +lines) lines = f.readline() # Contents new_file.write(""".. toctree:: :maxdepth: 2 :caption: Contents: """) for i in range(len(content_list)): if((content_list[i][1] == 0) and (content_list[i][2] == 0)): new_file.write(" "+content_list[i][3].replace(' ', '-').replace(',', '')+".rst\n") # Others headings next_format = index_format[1] lines = f.readline() skip_line = True while lines != "Introduction\n": if skip_line: if lines == "Revision History\n": new_file.write("\n\n" + lines) new_file.write("-" * (len(lines)-1)+"\n") skip_line = False lines = f.readline() continue new_file.write(lines) lines = f.readline() while lines == "\n": lines = f.readline() # Generate the rest files next_h1 = "" next_h2 = "" list_h1 = [] for i in range(len(content_list)): if((content_list[i][1] == 0) and (content_list[i][2] == 0)): list_h1.append(i) print(content_list[i][3]) print(list_h1) i = 0; appendix_num = 1 appendix_section = False while(lines != ""): if lines == "-\n": lines = f.readline() new_file.write("\n\n") elif(lines == content_list[list_h1[i]][3]+"\n"): print(content_list[list_h1[i]][3]) # close file new_file.close() del new_file new_file = 0 file_name = content_list[list_h1[i]][3].replace(' ', '-').replace(',', '')+".rst" print(file_name) new_file = open(file_name, "w", encoding="utf-8") new_file.write(file_header) new_file.write(lines) elif(lines == "="*len(content_list[list_h1[i]][3])+"\n"): new_file.write(lines) new_file.write(""" .. contents:: :depth: 3 :local: """) # add iteration i = i+1 print("next: ", content_list[list_h1[i]][3]) elif "Appendix "+ str(appendix_num) + " " + processed_lines == content_list[list_h1[i]][3]: new_file.close() del new_file new_file = 0 file_name = ("Appendix "+ str(appendix_num) + " " + processed_lines).replace(' ', '-').replace(',', '')+".rst" new_file = open(file_name, "w", encoding="utf-8") new_file.write(file_header) new_file.write(processed_lines+"\n") new_file.write("="*len(processed_lines)+"\n") appendix_section = True appendix_space = [] for j in range(list_h1[i]+1, len(content_list)): if content_list[j][1] > 0 and content_list[j][2] == 0: appendix_space.append(j) appendix_iter = 0 i = i+1 appendix_num += 1 elif appendix_section == True: if(processed_lines == content_list[appendix_space[appendix_iter]][3]): new_file.write("A."+str(content_list[appendix_space[appendix_iter]][1])+" "+processed_lines+"\n") new_file.write("-"*len("A."+str(content_list[list_h1[i]][1])+" "+processed_lines)) appendix_iter += 1 if len(appendix_space) == appendix_iter: appendix_section = False else: if re.match(r"Figure [0-9]*[.] +", lines): image_path = "images/" + re.sub(r"[\s\/\\]","-",re.sub(r"Figure [0-9]*[.] ", "", lines).strip())+".jpg" new_file.write(".. image:: " + image_path + "\n") new_file.write(" :width: 400\n") new_file.write(" :alt: " + lines + "\n") print(lines) new_file.write(lines.strip() + "\n") else: if re.match(r"Figure [0-9]*[.] +", lines): image_path = "images/" + re.sub(r"[\s\/\\]","-",re.sub(r"Figure [0-9]*[.] ", "", lines).strip())+".jpg" new_file.write(".. image:: " + image_path + "\n") new_file.write(" :width: 600\n") new_file.write(" :alt: " + lines + "\n") print(lines) new_file.write(lines.strip() + "\n") lines = f.readline() processed_lines = " ".join(lines.split()[1:]) new_file.close() ``` ::: ## Friday, 15 May 2020 try to convert the FAPI document