Introduction to Perl

  • Only 3 types:

    • Scalar (start with $)
    • Array (start with @)
    • Hashes (start with %)
  • Basic Hello World program

#!/usr/bin/perl print "Hello World\n";

Variable In Perl

  • Scalar The variable name must start with $
#!/usr/bin/perl $employee_name = "Coherent"; $employee_age = "20"; $employee_salary = "3.14"; print "Name = $employee_name\n"; print "Age = $employee_age\n"; print "Salary = $employee_salary\n";

result:

Name = Coherent Age = 20 Salary = 3.14

The scalar variable in perl will automatically assign a correct type to the variable

  • Array The variable name must start with @ The element in array should be treated as scalar.
#!/usr/bin/perl @names = ("Coherent", "Cheng119", "Jackman"); @ages = (20, 21, 22); print "\$names[0] = $names[0]\n"; print "\$names[1] = $names[1]\n"; print "\$names[2] = $names[2]\n"; print "\$ages[0] = $ages[0]\n"; print "\$ages[1] = $ages[1]\n"; print "\$ages[2] = $ages[2]\n";

result:

$names[0] = Coherent $names[1] = Cheng119 $names[2] = Jackman $ages[0] = 20 $ages[1] = 21 $ages[2] = 22
  • Hashes The variable name must start with % Same as the dict key value structure in python
#!/usr/bin/perl %data = ('Coherent', 20, 'Cheng119', 21, 'Jackman', 22); print "\$data{'Coherent'} = $data{'Coherent'}\n"; print "\$data{'Cheng119'} = $data{'Cheng119'}\n"; print "\$data{'Jackman'} = $data{'Jackman'}\n";

result

$data{'Coherent'} = 20 $data{'Cheng119'} = 21 $data{'Jackman'} = 22

String Operation

  • String concatenate
#!/usr/bin/perl # string concatenate $string1 = "Apple"; $string2 = "Pen"; $string_concat = $string1 . $string2; print "$string_concat\n";

result

ApplePen
  • String multiply
#!/usr/bin/perl # string multiply $stringvar = "abc"; $stringvar_double = $stringvar * 2; print "$stringvar_double\n";

result

0

Since there is no any digit in stringvar "abc", therefore the compiler will treat the string as 0

  • Alphanumeric string multiply
#!/usr/bin/perl # alphanumeric string multiply $string_numeric = "12p1"; $string_numeric_double = $string_numeric * 2; print "$string_numeric_double\n";

result

24

The result comes from multiply the first seen number in the string, therefore 12 * 2 = 24

  • String Increment
#!/usr/bin/perl # string increment $str1 = "abc"; $str1++; print "$str1\n"; $str2 = "z"; $str2++; print "$str2\n";

result

abd aa
  • String repeat
#!/usr/bin/perl # string repeat $str_repeat = "t" x 5; print "$str_repeat\n";

result

ttttt

List

#!/usr/bin/perl # Declaring a list @names = (Coherent, Cheng119, Jackman); # Accessing list element print "$names[1]\n"; # Accessing the last element of the list print "$names[-1]\n"; #Slicing list @chars = (a, b, c, d, e, f, g, h); #perform the deep copy to the new array @new_chars[0, 1, 2] = @chars[1, 3, 5]; @new_chars[0] = z; print "$new_chars[0]\n"; print "$chars[1]\n"; print "@chars\n"; print "@new_chars\n";

result

Cheng119 Jackman z b a b c d e f g h z d f
#!/usr/bin/perl # Something like linspace in numpy @list1 = (1..10); print "@list1\n"; # Part of list using ".." @list2 = (2, 5..9, 11); print "@list2\n"; # List using floating point @list3 = (2.1..6.3); print "@list3\n"; # List using string @list4 = (aa..ad); print "@list4\n"; # sort number & string @numbers = (9, 2, 8, 4, 1); @names = ("Coherent", "Cheng119", "Jackman"); @sorted_numbers = sort @numbers; print "@sorted_numbers\n"; @sorted_names = sort @names; print "@sorted_names\n"; @descent_numbers = reverse sort @numbers; print "@descent_numbers\n"; # Merge the element of lists into a single string $str1 = join(" ", "this", "is", "a", "string"); print "$str1\n"; $str2 = join("::", "this", "is", "a", "string"); print "$str2\n"; @ll = ("Here", "is", "a"); $str3 = join(" ", @ll, "string"); print "$str3\n"; # Split the string into array element $string_to_split = "words::seperated::by::colons"; @arr = split(/::/, $string_to_split); print "@arr\n"; # split using "" $chars = "abcdefghijk"; @char_arr = split(//, $chars); print "@char_arr\n";

result

1 2 3 4 5 6 7 8 9 10 2 5 6 7 8 9 11 2 3 4 5 6 aa ab ac ad 1 2 4 8 9 Cheng119 Coherent Jackman 9 8 4 2 1 this is a string this::is::a::string Here is a string words seperated by colons a b c d e f g h i j k

Conditional Statement

  • if elsif <- care no 'e'
  • switch case default -> else
  • enter unless region only if the condition is not true
#!/usr/bin/perl # if-else condition $salary = 10500; if($salary > 10000){ print "Employee is Manager\n"; } elsif($salary < 10000){ print "Employee is a staff\n"; } # Unless condition # when the condition is false, then it will enter the unless statement $ a = 22; unless($a < 20){ print("a is greater than 20\n"); } # Switch case use Switch; $var = 'key1'; @array = (10, 20, 30); %hash = ('key1' => 10, 'key2' => 20); switch($var){ case 10 {print "number is 10\n"} case "a" {print "string is a\n"} case [1..10, 42] {print "number is in dis-continous list\n"} case (\@array) {print "number is in array\n"} case (\%hash) {print "enter in hash\n"} else {print "not found in the cases\n"} }

result

Employee is Manager a is greater than 20 enter in hash

Loops

  • almost same structure in while & for loop
  • until loop: keep looping until meet the criteria
  • foreach loop: same as iterator in c++
#!/usr/bin/perl # While loop $ a = 10; while($a < 20){ print"Value of a: $a\n"; $a = $a + 1; } # For loop for($a = 10; $a < 20; $a++){ print"Value of a: $a\n"; } # Loop over the array @names = ("A", "B", "C", "D", "E", "F", "G"); $size = @names; for($i = 0; $i < $size; $i++){ print "$names[$i]\n"; } # Until Loop # repeatedly execute the statemtnt as long as the condition is false $ a = 5; until($a > 10){ print"Value of a: $a\n"; $a++; } # Foreach loop # iterate through the value instead of the index of the array @names = ("A", "B", "C", "D", "E", "F", "G"); foreach $name (@names){ print "$name\n"; }

File I/O

given the file:

leader country Phone price trump America iphone4 600 modi India pixel3 500 boris Britain iphoneX 800 putin Russia HTC9 400 imran Pakstan nokia 20 jinping China huawei 350 frank Germany iphone6 700

want to calculate the max price of the leader

use List::Util qw(min max); @price; open($fh, '<', "leaders.rpt"); while($line = <$fh>){ @data = split(' ', $line); #ignore the header of the rpt file if($data[-1] != 'price'){ #push the element into the array push(@price, $data[-1]); #also store the price -> name hash in leaders $name = $data[0]; $leaders{$data[-1]} = $name; } } print "@price\n"; # get the max element in the array $max_price = max @price; print "$max_price\n"; print "$leaders{$max_price}\n";

Functions

  • To access the arguments inside the function, use the special array @_
  • The first argument to the function is in $_[0], the second is $_[1]
sub Average{ # get the length of arguments passed in $n = scalar(@_); $sum = 0; foreach $item (@_){ $sum += $item; } $average = $sum / $n; print "Average is : $average\n"; } Average(10, 20, 30); # A function that combine all of the argument into 1 array sub PrintMyList{ # "my", make the list scope from global to private my @list = @_; print "Given list is @list\n"; } $x = 10; @y = (1, 2, 3, 4); PrintMyList($x, @y);

result

Average is : 20 Given list is 10 1 2 3 4

Hashes

  • Init the hash table using %
  • Insert in the hash table using $
#init the hash table (using %) %leader = ('Modi', India, 'Trump', USA, 'Putin', Russia); print "\$leader{'Modi'} = $leader{'Modi'}\n"; print "\$leader{'Trump'} = $leader{'Trump'}\n"; print "\$leader{'Putin'} = $leader{'Putin'}\n"; #insert into the hash table (using $) $leader{'Boris'} = Britian; #get keys & values of the hash table @names = keys %leader; @country = values %leader; print "@names\n"; print "@country\n"; #iterate through hashes while(($key, $value) = each %leader){ print "$key => $value\n"; }

result

$leader{'Modi'} = India $leader{'Trump'} = USA $leader{'Putin'} = Russia Trump Boris Putin Modi USA Britian Russia India Trump => USA Boris => Britian Putin => Russia Modi => India

Grep

  • using grep to filter the data
  • $ end word
  • ^ front word
  • [a-z] the word in alphabetic
  • [\d] the digit
  • ! not something
@names = ('Coherent', 'Cheng', 'Jackman', 123, 456, 789); # only grep the name in a~z @grepNames = grep(/[a-z]$/, @names); print "@grepNames\n"; # only grep the name that end with g @grepNames = grep(/g$/, @names); print "@grepNames\n"; #only grep the name that start with C @grepNames = grep(/^C/, @names); print "@grepNames\n"; #only grep the name that "not" start with C @grepNames = grep(!/^C/, @names); print "@grepNames\n"; #only grep the name only contains digit @grepNames = grep(/\d/, @names); print "@grepNames\n";

result

Coherent Cheng Jackman Cheng Coherent Cheng Jackman 123 456 789 123 456 789

Pattern Matching

  • Match operator

    • $&: contains the entire matched string
    • $`: contains everything before the matched string
    • $': contains everything after the matched string
  • Substitution operator(only the first match)

    • s/pattern/replacement/
  • Transliterate operator(for all match)

    • s/pattern/replacement/
# Split function $line = "this is a sentence"; @array = split(/ /, $line); print "@array\n"; # Match operator # $&: contains the entire matched string # $`: contains everything before the matched string # $': contains everything after the matched string $string = "perl tutorials by VLSI academy"; $string =~ m/by/; print "Match: $&\n"; #by print "Before: $`\n"; #perl tutorials print "After: $'\n"; #VLSI academy # Substitution operator (only the first match) # s/pattern/replacement/; $string = "VLSI Academy is university"; $string =~ s/university/online platform/; print "$string\n"; # Transliterate operator (for all match) # tr/pattern/replacement/; $string = "10001"; $string =~ s/0/9/; print "$string\n"; $string =~ tr/0/9/; print "$string\n";

result

this is a sentence Match: by Before: perl tutorials After: VLSI academy VLSI Academy is online platform 19001 19991

Data Extraction

using regular expression to get the keyword remember the bracket problem, or the compiler is stupid

#!/usr/bin/perl # extract the date info (ex: 19.2.1896) $text1 = "I was borned in 19.2.1896 in Taiwan"; ($date) = ($text1 =~ /(\d+\.\d+\.\d+)/); print "$date\n"; #extract the 64.582295 $text2 = "The temperature in lab is 64.582295"; ($temp) = ($text2 =~ /(\d{2}\.\d{6})/); print "$temp\n";

result

19.2.1896 64.582295
import sys import re import argparse from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QTextEdit, QMessageBox def parse_logs(flow, step, metrics, mode): log_file = f"{flow}_{step}.log" data = {} try: with open(log_file, 'r') as file: for line in file: for metric in metrics: if re.search(rf"\b{metric}\b", line): match = re.search(r'(\w+)\s*:\s*(\d+)', line) if match: design, value = match.groups() value = int(value) if design not in data: data[design] = [] data[design].append((metric, value)) except FileNotFoundError: print(f"Log file for {flow} not found.") return data def compare_metrics(data1, data2, metrics, mode): brief = {} compare = {"class1": [], "class2": [], "class3": [], "class4": []} for design in data1: for metric, value in data1[design]: if metric in metrics and (mode == "metric" or mode == "all metric"): if design not in brief: brief[design] = [] brief[design].append((metric, value, "flow1")) for design in data2: for metric, value in data2[design]: if metric in metrics and (mode == "metric" or mode == "all metric"): if design not in brief: brief[design] = [] brief[design].append((metric, value, "flow2")) for design in brief: flow1_metrics = [m[1] for m in brief[design] if m[2] == "flow1"] flow2_metrics = [m[1] for m in brief[design] if m[2] == "flow2"] if flow1_metrics and not flow2_metrics: compare["class1"].append(design) elif flow1_metrics and flow2_metrics: compare["class2"].append(design) elif flow1_metrics and flow2_metrics and flow1_metrics[0] < flow2_metrics[0]: compare["class3"].append(design) elif flow1_metrics and flow2_metrics and flow1_metrics[0] > flow2_metrics[0]: compare["class4"].append(design) return brief, compare def print_summary(brief, compare, flow1, flow2, metrics, mode): result = [] result.append("* Brief") result.append(f"{flow1} degrade by {metrics} according {mode}: {', '.join([d for d in brief if any(m[2] == 'flow1' for m in brief[d])])}") result.append(f"{flow2} degrade by {metrics} according {mode}: {', '.join([d for d in brief if any(m[2] == 'flow2' for m in brief[d])])}") result.append("\n* Compare") result.append(f"class 1: {flow1} degrade but {flow2} not degrade by {metrics} according {mode}: {', '.join(compare['class1'])}") result.append(f"class 2: {flow1} degrade and {flow2} degrade by {metrics} according {mode}: {', '.join(compare['class2'])}") result.append(f"class 3: {flow1} better than {flow2} by {metrics} according {mode}: {', '.join(compare['class3'])}") result.append(f"class 4: {flow2} better than {flow1} by {metrics} according {mode}: {', '.join(compare['class4'])}") return "\n".join(result) def main_cli(): parser = argparse.ArgumentParser(description="Log Viewer") parser.add_argument("flow1", type=str, help="Name of the first flow") parser.add_argument("flow2", type=str, help="Name of the second flow") parser.add_argument("step", type=str, help="Step name") parser.add_argument("metric", type=str, help="Metrics to parse, separated by |") parser.add_argument("mode", type=str, help="Mode (metric/all metric)") args = parser.parse_args() metrics = args.metric.split('|') data1 = parse_logs(args.flow1, args.step, metrics, args.mode) data2 = parse_logs(args.flow2, args.step, metrics, args.mode) brief, compare = compare_metrics(data1, data2, metrics, args.mode) summary = print_summary(brief, compare, args.flow1, args.flow2, metrics, args.mode) print(summary) class LogViewerApp(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): layout = QVBoxLayout() # Input fields self.flow1Input = QLineEdit(self) self.flow2Input = QLineEdit(self) self.stepInput = QLineEdit(self) self.metricInput = QLineEdit(self) self.modeInput = QLineEdit(self) layout.addWidget(QLabel('Flow 1:')) layout.addWidget(self.flow1Input) layout.addWidget(QLabel('Flow 2:')) layout.addWidget(self.flow2Input) layout.addWidget(QLabel('Step:')) layout.addWidget(self.stepInput) layout.addWidget(QLabel('Metric:')) layout.addWidget(self.metricInput) layout.addWidget(QLabel('Mode:')) layout.addWidget(self.modeInput) # Buttons btnLayout = QHBoxLayout() self.runButton = QPushButton('Run', self) self.runButton.clicked.connect(self.runAnalysis) btnLayout.addWidget(self.runButton) layout.addLayout(btnLayout) # Output area self.outputArea = QTextEdit(self) layout.addWidget(self.outputArea) self.setLayout(layout) self.setWindowTitle('Log Viewer') self.show() def runAnalysis(self): flow1 = self.flow1Input.text() flow2 = self.flow2Input.text() step = self.stepInput.text() metrics = self.metricInput.text().split('|') mode = self.modeInput.text() if not flow1 or not flow2 or not step or not metrics or not mode: QMessageBox.warning(self, "Input Error", "All fields must be filled out") return data1 = parse_logs(flow1, step, metrics, mode) data2 = parse_logs(flow2, step, metrics, mode) brief, compare = compare_metrics(data1, data2, metrics, mode) summary = print_summary(brief, compare, flow1, flow2, metrics, mode) self.outputArea.setText(summary) def main_gui(): app = QApplication(sys.argv) ex = LogViewerApp() sys.exit(app.exec_()) if __name__ == "__main__": if len(sys.argv) > 1: main_cli() else: main_gui()
import sys import re def parse_logs(flow, step, metrics, mode): # Assuming log files are named as <flow>_<step>.log log_file = f"{flow}_{step}.log" data = {} try: with open(log_file, 'r') as file: for line in file: for metric in metrics: if re.search(rf"\b{metric}\b", line): # Extract the design and metric value match = re.search(r'(\w+)\s*:\s*(\d+)', line) if match: design, value = match.groups() value = int(value) if design not in data: data[design] = [] data[design].append((metric, value)) except FileNotFoundError: print(f"Log file for {flow} not found.") return data def compare_metrics(data1, data2, metrics, mode): brief = {} compare = {"class1": [], "class2": [], "class3": [], "class4": []} # Calculate brief summary for design in data1: for metric, value in data1[design]: if metric in metrics and (mode == "metric" or mode == "all metric"): if design not in brief: brief[design] = [] brief[design].append((metric, value, "flow1")) for design in data2: for metric, value in data2[design]: if metric in metrics and (mode == "metric" or mode == "all metric"): if design not in brief: brief[design] = [] brief[design].append((metric, value, "flow2")) # Calculate comparison classes for design in brief: flow1_metrics = [m[1] for m in brief[design] if m[2] == "flow1"] flow2_metrics = [m[1] for m in brief[design] if m[2] == "flow2"] if flow1_metrics and not flow2_metrics: compare["class1"].append(design) elif flow1_metrics and flow2_metrics: compare["class2"].append(design) elif flow1_metrics and flow2_metrics and flow1_metrics[0] < flow2_metrics[0]: compare["class3"].append(design) elif flow1_metrics and flow2_metrics and flow1_metrics[0] > flow2_metrics[0]: compare["class4"].append(design) return brief, compare def print_summary(brief, compare, flow1, flow2, metrics, mode): print("* Brief") print(f"{flow1} degrade by {metrics} according {mode}: {', '.join([d for d in brief if any(m[2] == 'flow1' for m in brief[d])])}") print(f"{flow2} degrade by {metrics} according {mode}: {', '.join([d for d in brief if any(m[2] == 'flow2' for m in brief[d])])}") print("\n* Compare") print(f"class 1: {flow1} degrade but {flow2} not degrade by {metrics} according {mode}: {', '.join(compare['class1'])}") print(f"class 2: {flow1} degrade and {flow2} degrade by {metrics} according {mode}: {', '.join(compare['class2'])}") print(f"class 3: {flow1} better than {flow2} by {metrics} according {mode}: {', '.join(compare['class3'])}") print(f"class 4: {flow2} better than {flow1} by {metrics} according {mode}: {', '.join(compare['class4'])}") def main(): if len(sys.argv) != 6: print("Usage: python script.py <flow1> <flow2> <step> <metric> <mode>") sys.exit(1) flow1, flow2, step, metrics, mode = sys.argv[1:6] metrics = metrics.split('|') data1 = parse_logs(flow1, step, metrics, mode) data2 = parse_logs(flow2, step, metrics, mode) brief, compare = compare_metrics(data1, data2, metrics, mode) print_summary(brief, compare, flow1, flow2, metrics, mode) if __name__ == "__main__": main()
import sys import re import argparse from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QTextEdit, QMessageBox def parse_logs(flow, step, metrics, mode): log_file = "{}_{}.log".format(flow, step) data = {} try: with open(log_file, 'r') as file: for line in file: for metric in metrics: if re.search(r'\b{}\b'.format(metric), line): match = re.search(r'(\w+)\s*:\s*(\d+)', line) if match: design, value = match.groups() value = int(value) if design not in data: data[design] = [] data[design].append((metric, value)) except FileNotFoundError: print("Log file for {} not found.".format(flow)) return data def compare_metrics(data1, data2, metrics, mode): brief = {} compare = {"class1": [], "class2": [], "class3": [], "class4": []} for design in data1: for metric, value in data1[design]: if metric in metrics and (mode == "metric" or mode == "all metric"): if design not in brief: brief[design] = [] brief[design].append((metric, value, "flow1")) for design in data2: for metric, value in data2[design]: if metric in metrics and (mode == "metric" or mode == "all metric"): if design not in brief: brief[design] = [] brief[design].append((metric, value, "flow2")) for design in brief: flow1_metrics = [m[1] for m in brief[design] if m[2] == "flow1"] flow2_metrics = [m[1] for m in brief[design] if m[2] == "flow2"] if flow1_metrics and not flow2_metrics: compare["class1"].append(design) elif flow1_metrics and flow2_metrics: compare["class2"].append(design) elif flow1_metrics and flow2_metrics and flow1_metrics[0] < flow2_metrics[0]: compare["class3"].append(design) elif flow1_metrics and flow2_metrics and flow1_metrics[0] > flow2_metrics[0]: compare["class4"].append(design) return brief, compare def print_summary(brief, compare, flow1, flow2, metrics, mode): result = [] result.append("* Brief") result.append("{} degrade by {} according {}: {}".format(flow1, metrics, mode, ', '.join([d for d in brief if any(m[2] == 'flow1' for m in brief[d])]))) result.append("{} degrade by {} according {}: {}".format(flow2, metrics, mode, ', '.join([d for d in brief if any(m[2] == 'flow2' for m in brief[d])]))) result.append("\n* Compare") result.append("class 1: {} degrade but {} not degrade by {} according {}: {}".format(flow1, flow2, metrics, mode, ', '.join(compare['class1']))) result.append("class 2: {} degrade and {} degrade by {} according {}: {}".format(flow1, flow2, metrics, mode, ', '.join(compare['class2']))) result.append("class 3: {} better than {} by {} according {}: {}".format(flow1, flow2, metrics, mode, ', '.join(compare['class3']))) result.append("class 4: {} better than {} by {} according {}: {}".format(flow2, flow1, metrics, mode, ', '.join(compare['class4']))) return "\n".join(result) def main_cli(): parser = argparse.ArgumentParser(description="Log Viewer") parser.add_argument("flow1", type=str, help="Name of the first flow") parser.add_argument("flow2", type=str, help="Name of the second flow") parser.add_argument("step", type=str, help="Step name") parser.add_argument("metric", type=str, help="Metrics to parse, separated by |") parser.add_argument("mode", type=str, help="Mode (metric/all metric)") args = parser.parse_args() metrics = args.metric.split('|') data1 = parse_logs(args.flow1, args.step, metrics, args.mode) data2 = parse_logs(args.flow2, args.step, metrics, args.mode) brief, compare = compare_metrics(data1, data2, metrics, args.mode) summary = print_summary(brief, compare, args.flow1, args.flow2, metrics, args.mode) print(summary) class LogViewerApp(QWidget): def __init__(self): super().__init__() self.initUI() def initUI(self): layout = QVBoxLayout() # Input fields self.flow1Input = QLineEdit(self) self.flow2Input = QLineEdit(self) self.stepInput = QLineEdit(self) self.metricInput = QLineEdit(self) self.modeInput = QLineEdit(self) layout.addWidget(QLabel('Flow 1:')) layout.addWidget(self.flow1Input) layout.addWidget(QLabel('Flow 2:')) layout.addWidget(self.flow2Input) layout.addWidget(QLabel('Step:')) layout.addWidget(self.stepInput) layout.addWidget(QLabel('Metric:')) layout.addWidget(self.metricInput) layout.addWidget(QLabel('Mode:')) layout.addWidget(self.modeInput) # Buttons btnLayout = QHBoxLayout() self.runButton = QPushButton('Run', self) self.runButton.clicked.connect(self.runAnalysis) btnLayout.addWidget(self.runButton) layout.addLayout(btnLayout) # Output area self.outputArea = QTextEdit(self) layout.addWidget(self.outputArea) self.setLayout(layout) self.setWindowTitle('Log Viewer') self.show() def runAnalysis(self): flow1 = self.flow1Input.text() flow2 = self.flow2Input.text() step = self.stepInput.text() metrics = self.metricInput.text().split('|') mode = self.modeInput.text() if not flow1 or not flow2 or not step or not metrics or not mode: QMessageBox.warning(self, "Input Error", "All fields must be filled out") return data1 = parse_logs(flow1, step, metrics, mode) data2 = parse_logs(flow2, step, metrics, mode) brief, compare = compare_metrics(data1, data2, metrics, mode) summary = print_summary(brief, compare, flow1, flow2, metrics, mode) self.outputArea.setText(summary) def main_gui(): app = QApplication(sys.argv) ex = LogViewerApp() sys.exit(app.exec_()) if __name__ == "__main__": if len(sys.argv) > 1: main_cli() else: main_gui()
tr_path = 'covid.train.csv' # path to training data tt_path = 'covid.test.csv' # path to testing data # PyTorch import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader # For data preprocess import numpy as np import csv import os # For plotting import matplotlib.pyplot as plt from matplotlib.pyplot import figure myseed = 42069 # set a random seed for reproducibility torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(myseed) torch.manual_seed(myseed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(myseed) def get_device(): ''' Get device (if GPU is available, use GPU) ''' device = 'cuda' if torch.cuda.is_available() else 'cpu' print("[Device]: " + device) return device def plot_learning_curve(loss_record, title=''): ''' Plot learning curve of your DNN (train & dev loss) ''' total_steps = len(loss_record['train']) x_1 = range(total_steps) x_2 = x_1[::len(loss_record['train']) // len(loss_record['dev'])] figure(figsize=(6, 4)) plt.plot(x_1, loss_record['train'], c='tab:red', label='train') plt.plot(x_2, loss_record['dev'], c='tab:cyan', label='dev') plt.ylim(0.0, 5.) plt.xlabel('Training steps') plt.ylabel('MSE loss') plt.title('Learning curve of {}'.format(title)) plt.legend() plt.show() plt.savefig("Learning_Curve.png") def plot_pred(dv_set, model, device, lim=35., preds=None, targets=None): ''' Plot prediction of your DNN ''' if preds is None or targets is None: model.eval() preds, targets = [], [] for x, y in dv_set: x, y = x.to(device), y.to(device) with torch.no_grad(): pred = model(x) preds.append(pred.detach().cpu()) targets.append(y.detach().cpu()) preds = torch.cat(preds, dim=0).numpy() targets = torch.cat(targets, dim=0).numpy() figure(figsize=(5, 5)) plt.scatter(targets, preds, c='r', alpha=0.5) plt.plot([-0.2, lim], [-0.2, lim], c='b') plt.xlim(-0.2, lim) plt.ylim(-0.2, lim) plt.xlabel('ground truth value') plt.ylabel('predicted value') plt.title('Ground Truth v.s. Prediction') plt.show() plt.savefig("Pred.png") class COVID19Dataset(Dataset): ''' Dataset for loading and preprocessing the COVID19 dataset ''' def __init__(self, path, mode='train', target_only=False): self.mode = mode # Read data into numpy arrays with open(path, 'r') as fp: data = list(csv.reader(fp)) data = np.array(data[1:])[:, 1:].astype(float) if not target_only: feats = list(range(93)) else: feats = [57, 75] data = data[:, feats] # TODO: Using 40 states & 2 tested_positive features (indices = 57 & 75) if mode == 'test': # Testing data # data: 893 x 93 (40 states + day 1 (18) + day 2 (18) + day 3 (17)) data = data[:, feats] self.data = torch.FloatTensor(data) else: # Training data (train/dev sets) # data: 2700 x 94 (40 states + day 1 (18) + day 2 (18) + day 3 (18)) target = data[:, -1] data = data[:, feats] # Splitting training data into train & dev sets if mode == 'train': indices = [i for i in range(len(data)) if i % 10 != 0] elif mode == 'dev': indices = [i for i in range(len(data)) if i % 10 == 0] # Convert data into PyTorch tensors self.data = torch.FloatTensor(data[indices]) self.target = torch.FloatTensor(target[indices]) # Normalize features (you may remove this part to see what will happen) self.data[:, 40:] = \ (self.data[:, 40:] - self.data[:, 40:].mean(dim=0, keepdim=True)) \ / self.data[:, 40:].std(dim=0, keepdim=True) self.dim = self.data.shape[1] print('Finished reading the {} set of COVID19 Dataset ({} samples found, each dim = {})' .format(mode, len(self.data), self.dim)) def __getitem__(self, index): # Returns one sample at a time if self.mode in ['train', 'dev']: # For training return self.data[index], self.target[index] else: # For testing (no target) return self.data[index] def __len__(self): # Returns the size of the dataset return len(self.data) def prep_dataloader(path, mode, batch_size, n_jobs=0, target_only=False): ''' Generates a dataset, then is put into a dataloader. ''' dataset = COVID19Dataset(path, mode=mode, target_only=target_only) # Construct dataset dataloader = DataLoader( dataset, batch_size, shuffle=(mode == 'train'), drop_last=False, num_workers=n_jobs, pin_memory=True) # Construct dataloader return dataloader class NeuralNet(nn.Module): ''' A simple fully-connected deep neural network ''' def __init__(self, input_dim): super(NeuralNet, self).__init__() # Define your neural network here # TODO: How to modify this model to achieve better performance? self.net = nn.Sequential( nn.Linear(input_dim, 16), nn.BatchNorm1d(16), nn.Dropout(0.1), nn.ReLU(), nn.Linear(16, 1) ) # Mean squared error loss self.criterion = nn.MSELoss(reduction='mean') def forward(self, x): ''' Given input of size (batch_size x input_dim), compute output of the network ''' return self.net(x).squeeze(1) def cal_loss(self, pred, target): ''' Calculate loss ''' # TODO: you may implement L1/L2 regularization here regularization_loss = 0 for param in model.parameters(): regularization_loss += torch.sum(param ** 2) return self.criterion(pred, target) + 0.00075 * regularization_loss def train(tr_set, dv_set, model, config, device): ''' DNN training ''' n_epochs = config['n_epochs'] # Maximum number of epochs # Setup optimizer optimizer = getattr(torch.optim, config['optimizer'])( model.parameters(), **config['optim_hparas']) min_mse = 1000. loss_record = {'train': [], 'dev': []} # for recording training loss early_stop_cnt = 0 epoch = 0 while epoch < n_epochs: model.train() # set model to training mode for x, y in tr_set: # iterate through the dataloader optimizer.zero_grad() # set gradient to zero x, y = x.to(device), y.to(device) # move data to device (cpu/cuda) pred = model(x) # forward pass (compute output) mse_loss = model.cal_loss(pred, y) # compute loss mse_loss.backward() # compute gradient (backpropagation) optimizer.step() # update model with optimizer loss_record['train'].append(mse_loss.detach().cpu().item()) # After each epoch, test your model on the validation (development) set. dev_mse = dev(dv_set, model, device) if dev_mse < min_mse: # Save model if your model improved min_mse = dev_mse print('Saving model (epoch = {:4d}, loss = {:.4f})' .format(epoch + 1, min_mse)) torch.save(model.state_dict(), config['save_path']) # Save model to specified path early_stop_cnt = 0 else: early_stop_cnt += 1 epoch += 1 loss_record['dev'].append(dev_mse) if early_stop_cnt > config['early_stop']: # Stop training if your model stops improving for "config['early_stop']" epochs. break print('Finished training after {} epochs'.format(epoch)) return min_mse, loss_record def dev(dv_set, model, device): model.eval() # set model to evalutation mode total_loss = 0 for x, y in dv_set: # iterate through the dataloader x, y = x.to(device), y.to(device) # move data to device (cpu/cuda) with torch.no_grad(): # disable gradient calculation pred = model(x) # forward pass (compute output) mse_loss = model.cal_loss(pred, y) # compute loss total_loss += mse_loss.detach().cpu().item() * len(x) # accumulate loss total_loss = total_loss / len(dv_set.dataset) # compute averaged loss return total_loss def test(tt_set, model, device): model.eval() # set model to evalutation mode preds = [] for x in tt_set: # iterate through the dataloader x = x.to(device) # move data to device (cpu/cuda) with torch.no_grad(): # disable gradient calculation pred = model(x) # forward pass (compute output) preds.append(pred.detach().cpu()) # collect prediction preds = torch.cat(preds, dim=0).numpy() # concatenate all predictions and convert to a numpy array return preds device = get_device() # get the current available device ('cpu' or 'cuda') print(device) os.makedirs('models', exist_ok=True) # The trained model will be saved to ./models/ target_only = False # TODO: Using 40 states & 2 tested_positive features # TODO: How to tune these hyper-parameters to improve your model's performance? config = { 'n_epochs': 3000, # maximum number of epochs 'batch_size': 270, # mini-batch size for dataloader 'optimizer': 'SGD', # optimization algorithm (optimizer in torch.optim) 'optim_hparas': { # hyper-parameters for the optimizer (depends on which optimizer you are using) 'lr': 0.001, # learning rate of SGD 'momentum': 0.9 # momentum for SGD }, 'early_stop': 200, # early stopping epochs (the number epochs since your model's last improvement) 'save_path': 'models/model.pth' # your model will be saved here } tr_set = prep_dataloader(tr_path, 'train', config['batch_size'], target_only=target_only) dv_set = prep_dataloader(tr_path, 'dev', config['batch_size'], target_only=target_only) tt_set = prep_dataloader(tt_path, 'test', config['batch_size'], target_only=target_only) model = NeuralNet(tr_set.dataset.dim).to(device) # Construct model and move to device model_loss, model_loss_record = train(tr_set, dv_set, model, config, device) plot_learning_curve(model_loss_record, title='deep model') del model model = NeuralNet(tr_set.dataset.dim).to(device) ckpt = torch.load(config['save_path'], map_location='cpu') # Load your best model model.load_state_dict(ckpt) plot_pred(dv_set, model, device) # Show prediction on the validation set def save_pred(preds, file): ''' Save predictions to specified file ''' print('Saving results to {}'.format(file)) with open(file, 'w') as fp: writer = csv.writer(fp) writer.writerow(['id', 'tested_positive']) for i, p in enumerate(preds): writer.writerow([i, p]) preds = test(tt_set, model, device) # predict COVID-19 cases with your model save_pred(preds, 'pred.csv') # save prediction file to pred.csv