forked from andy-yangz/nlp_multi_task_learning_pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun.sh
More file actions
157 lines (144 loc) · 3.95 KB
/
run.sh
File metadata and controls
157 lines (144 loc) · 3.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
echo "Running Model"
echo "POS"
python main.py --data './data' \
--emsize 256 \
--npos_layers 2 \
--nchunk_layers 0 \
--nhid 128 \
--batch_size 128 \
--seq_len 10 \
--cuda \
--train_mode 'POS' \
--epochs 300 \
--log_interval 20 \
--save './result/pos_model'
echo "Chunk"
python main.py --data './data' \
--emsize 256 \
--npos_layers 0 \
--nchunk_layers 2 \
--nhid 128 \
--batch_size 128 \
--seq_len 10 \
--cuda \
--train_mode 'Chunk' \
--epochs 300 \
--log_interval 20 \
--save './result/chunk_model'
echo "Joint Training on the same level"
python main.py --data './data' \
--emsize 256 \
--npos_layers 2 \
--nchunk_layers 2 \
--nhid 128 \
--batch_size 128 \
--seq_len 10 \
--cuda \
--train_mode 'Joint' \
--epochs 300 \
--log_interval 20 \
--save './result/joint_same'
echo "Joint Training on the different level"
python main.py --data './data' \
--emsize 256 \
--npos_layers 1 \
--nchunk_layers 2 \
--nhid 128 \
--batch_size 128 \
--seq_len 10 \
--cuda \
--train_mode 'Joint' \
--epochs 300 \
--log_interval 20 \
--save './result/joint_diff'
# echo "Embedding size"
# for emsize in 128 256 512
# do
# echo "Embedding size $emsize"
# python main.py --data './data' \
# --emsize $emsize \
# --nlayers 1 \
# --nhid 128 \
# --batch_size 128 \
# --seq_len 15 \
# --cuda \
# --epochs 300 \
# --log_interval 20 \
# --save './result/pos_model'
# done
# echo "Number of Layers"
# for nlayers in 2 3
# do
# echo "NUmber of layers $nlayers"
# python main.py --data './data' \
# --emsize 128 \
# --nlayers $nlayers \
# --nhid 128 \
# --batch_size 128 \
# --seq_len 15 \
# --cuda \
# --epochs 300 \
# --log_interval 20 \
# --save './result/pos_model'
# done
# echo "Number of hidden units"
# for nhid in 256 512
# do
# echo "Number of hidden units $nhid"
# python main.py --data './data' \
# --emsize 128 \
# --nlayers 1 \
# --nhid $nhid \
# --batch_size 128 \
# --seq_len 15 \
# --train_mode 'POS' \
# --cuda \
# --epochs 300 \
# --log_interval 10 \
# --save './result/pos_model'
# done
# echo "Sequence Length"
# for seq_len in 10 20
# do
# python main.py --data './data' \
# --emsize 128 \
# --nlayers 1 \
# --nhid 128 \
# --batch_size 128 \
# --seq_len $seq_len \
# --train_mode 'POS' \
# --cuda \
# --epochs 300 \
# --log_interval 10 \
# --save './result/pos_model'
# done
# for dropout in 0.4 0.6
# do
# python main.py --data './data' \
# --emsize 128 \
# --nlayers 1 \
# --nhid 128 \
# --batch_size 128 \
# --seq_len 15 \
# --dropout $dropout \
# --train_mode 'POS' \
# --cuda \
# --epochs 300 \
# --log_interval 10 \
# --save './result/pos_model'
# done
# for rnn_type in 'GRU' 'Elman'
# do
# python main.py --data './data' \
# --emsize 128 \
# --nlayers 1 \
# --nhid 128 \
# --batch_size 128 \
# --seq_len 15 \
# --rnn_type $rnn_type \
# --train_mode 'POS' \
# --cuda \
# --epochs 300 \
# --log_interval 10 \
# --save './result/pos_model'
# done