Practice Problems:
class Solution {
public:
int rob(vector<int>& nums) {
int len = nums.size();
if(len == 1) {
return nums[0];
}
int dp[len];
fill(dp, dp + len, 0);
dp[0] = nums[0];
dp[1] = max(dp[0], nums[1]);
for(int i = 2; i < len; i++) {
dp[i] = max(dp[i - 2] + nums[i], dp[i - 1]);
}
return dp[len - 1];
}
};
Practice Problems:
class Solution {
public:
int longestCommonSubsequence(string text1, string text2) {
int len1 = text1.length(), len2 = text2.length();
int dp[len1 + 1][len2 + 1];
memset(dp, 0, sizeof(dp));
for(int i = 1; i <= len1; i++) {
for(int j = 1; j <= len2; j++) {
if(text1[i - 1] == text2[j - 1]) {
dp[i][j] = dp[i - 1][j - 1] + 1;
}
dp[i][j] = max(max(dp[i][j], dp[i - 1][j]), dp[i][j - 1]);
}
}
return dp[len1][len2];
}
};
Practice Problems:
class Solution {
public:
int coinChange(vector<int>& coins, int amount) {
int dp[amount + 1];
fill(dp, dp + amount + 1, 1e4 + 1);
dp[0] = 0;
for(int i = 0; i < coins.size(); i++) {
for(int j = 1; j <= amount; j++) {
if(j >= coins[i]) {
dp[j] = min(dp[j], dp[j - coins[i]] + 1);
}
}
}
if(dp[amount] > 1e4) return -1;
return dp[amount];
}
};
#include <iostream>
#include <algorithm>
#include <vector>
#include <math.h>
using namespace std;
#define pb push_back
#define ll long long
const int MAXN = 1e5 + 4;
int n;
int arr[MAXN];
int main() {
while(cin >> n) {
for(int i = 0; i < n; i++) {
cin >> arr[i];
}
int dp[n];
dp[0] = 0;
dp[1] = abs(arr[1] - arr[0]);
for(int i = 2; i < n; i++) {
dp[i] = min(abs(arr[i] - arr[i - 1]) + dp[i - 1], abs(arr[i] - arr[i - 2]) + dp[i - 2]);
}
cout << dp[n - 1] << endl;
}
}
#include <iostream>
#include <algorithm>
#include <vector>
#include <math.h>
using namespace std;
#define pb push_back
#define ll long long
const int MAXN = 1e5 + 1;
int n, k;
int arr[MAXN];
int main() {
while(cin >> n >> k) {
int dp[n];
for(int i = 1; i <= n; i++) {
cin >> arr[i];
dp[i] = 1e9 + 7;
}
dp[1] = 0;
for(int i = 2; i <= n; i++) {
for(int j = 1; j <= k; j++) {
if(i - j >= 1) {
dp[i] = min(dp[i - j] + abs(arr[i - j] - arr[i]), dp[i]);
}
}
}
cout << dp[n] << endl;
}
}
#include <iostream>
#include <algorithm>
#include <vector>
#include <math.h>
using namespace std;
#define pb push_back
#define ll long long
const int MAXN = 1e5 + 1;
int n;
struct happiness {
int a;
int b;
int c;
};
int maxn(int a, int b, int c) {
return max(max(a, b), c);
}
int main() {
while(cin >> n) {
happiness happ[MAXN];
int dpa[n + 1], dpb[n + 1], dpc[n + 1];
for(int i = 1; i <= n; i++) {
cin >> happ[i].a >> happ[i].b >> happ[i].c;
dpa[i] = 0;
dpb[i] = 0;
dpc[i] = 0;
}
dpa[1] = happ[1].a;
dpb[1] = happ[1].b;
dpc[1] = happ[1].c;
for(int i = 2; i <= n; i++) {
dpa[i] = max(dpb[i - 1], dpc[i - 1]) + happ[i].a;
dpb[i] = max(dpa[i - 1], dpc[i - 1]) + happ[i].b;
dpc[i] = max(dpa[i - 1], dpb[i - 1]) + happ[i].c;
}
cout << maxn(dpa[n], dpb[n], dpc[n]) << endl;
}
}
#include <iostream>
#include <algorithm>
#include <vector>
#include <math.h>
using namespace std;
#define pb push_back
#define ll long long
const int MAXN = 1e5 + 1;
int N, W;
int w[101], v[101];
int main() {
while(cin >> N >> W) {
ll dp[W + 1];
fill(dp, dp + W +1, 0);
for(int i = 0; i < N; i++) {
cin >> w[i] >> v[i];
}
for(int i = 0; i < N; i++) {
for(int j = W; j >= w[i]; j--) {
dp[j] = max(dp[j - w[i]] + v[i], dp[j]);
}
}
cout << dp[W] << endl;
}
}
#include <iostream>
#include <algorithm>
#include <vector>
#include <math.h>
#include <string.h>
#include <climits>
using namespace std;
#define pb push_back
#define ll long long
const ll MAXN = INT_MAX;
ll N, W;
ll w[101], v[101];
int main() {
while(cin >> N >> W) {
ll total = 0;
for(ll i = 0; i < N; i++) {
cin >> w[i] >> v[i];
total += v[i];
}
ll dp[total + 1];
fill(dp, dp + total + 1, INT_MAX);
dp[0] = 0;
for(ll i = 0; i < N; i++) {
for(ll j = total; j >= v[i]; j--) {
dp[j] = min(dp[j - v[i]] + w[i], dp[j]);
}
}
ll ans = -1;
for(ll i = 0; i <= total; i++) {
if(dp[i] <= W) {
ans = max(ans, i);
}
}
cout << ans << endl;
}
}
#include <iostream>
#include <algorithm>
#include <vector>
#include <math.h>
#include <string.h>
#include <climits>
using namespace std;
#define pb push_back
#define ll long long
string a, b, ans;
int len[3001][3001];
void sub(int lena, int lenb) {
if(lena == 0 || lenb == 0) return;
if(a[lena - 1] == b[lenb - 1]) {
ans = a[lena - 1] + ans;
sub(lena - 1, lenb - 1);
} else {
if(len[lena - 1][lenb] > len[lena][lenb - 1]) {
sub(lena - 1, lenb);
} else {
sub(lena, lenb - 1);
}
}
}
int main() {
while(cin >> a >> b) {
int lena = a.length(), lenb = b.length();
ans = "";
memset(len, 0, sizeof(len));
for(int i = 1; i <= lena; i++) {
for(int j = 1; j <= lenb; j++) {
if(a[i - 1] == b[j - 1]) {
len[i][j] = len[i - 1][j - 1] + 1;
} else {
len[i][j] = max(len[i - 1][j], len[i][j - 1]);
}
}
}
sub(lena, lenb);
cout << ans << endl;
}
}
Request for GPU
Mar 14, 2025Introduction This article delves into the architecture and mechanics of decoder-only transformers, which are a crucial component of many large language models (LLMs). It highlights the structure, attention mechanisms, and embedding techniques that make these models effective for various natural language processing (NLP) tasks. Decoder-Only Transformer Architecture Overview Decoder-only transformers, unlike the traditional encoder-decoder structure, use only the decoder component to process and generate text. This architecture is particularly suited for tasks that involve sequential generation, such as text completion and language modeling. Structure The decoder-only transformer consists of multiple layers, each containing self-attention mechanisms and feed-forward neural networks.
Jun 26, 2024Introduction This article provides a visual and intuitive explanation of the transformer architecture, which has revolutionized natural language processing (NLP) by enabling efficient handling of sequential data through self-attention mechanisms. It covers the structure, mechanics, and key components such as the encoder, decoder, attention mechanisms, and embeddings. Transformer Architecture Overview The transformer architecture, introduced by Vaswani et al. in 2017, eliminates the need for recurrent layers by using self-attention mechanisms, allowing for parallel processing and better handling of long-range dependencies. Encoder-Decoder Structure The transformer model consists of an encoder-decoder architecture, each composed of multiple layers.
Jun 26, 2024This article offers a detailed explanation of transformers, a revolutionary architecture in natural language processing (NLP) that has significantly advanced the capabilities of large language models (LLMs). It covers the structure, mechanics, and key components of transformers, including the encoder, decoder, attention mechanisms, and embeddings.
Jun 26, 2024or
By clicking below, you agree to our terms of service.
New to HackMD? Sign up