| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 
 | def main():
 parser = argparse.ArgumentParser()
 parser.add_argument('--net', type=str, help='Net model class', required=True)
 parser.add_argument('--traindb', type=str, help='Training datasets', nargs='+', choices=split.available_datasets,
 required=True)
 parser.add_argument('--valdb', type=str, help='Validation datasets', nargs='+', choices=split.available_datasets,
 required=True)
 parser.add_argument('--dfdc_faces_df_path', type=str, action='store',
 help='Path to the Pandas Dataframe obtained from extract_faces.py on the DFDC dataset. '
 'Required for training/validating on the DFDC dataset.')
 parser.add_argument('--dfdc_faces_dir', type=str, action='store',
 help='Path to the directory containing the faces extracted from the DFDC dataset. '
 'Required for training/validating on the DFDC dataset.')
 parser.add_argument('--ffpp_faces_df_path', type=str, action='store',
 help='Path to the Pandas Dataframe obtained from extract_faces.py on the FF++ dataset. '
 'Required for training/validating on the FF++ dataset.')
 parser.add_argument('--ffpp_faces_dir', type=str, action='store',
 help='Path to the directory containing the faces extracted from the FF++ dataset. '
 'Required for training/validating on the FF++ dataset.')
 parser.add_argument('--face', type=str, help='Face crop or scale', required=True,
 choices=['scale', 'tight'])
 parser.add_argument('--size', type=int, help='Train patch size', required=True)
 
 parser.add_argument('--batch', type=int, help='Batch size to fit in GPU memory', default=32)
 parser.add_argument('--lr', type=float, default=1e-5, help='Learning rate')
 parser.add_argument('--valint', type=int, help='Validation interval (iterations)', default=500)
 parser.add_argument('--patience', type=int, help='Patience before dropping the LR [validation intervals]',
 default=10)
 parser.add_argument('--maxiter', type=int, help='Maximum number of iterations', default=20000)
 parser.add_argument('--init', type=str, help='Weight initialization file')
 parser.add_argument('--scratch', action='store_true', help='Train from scratch')
 
 parser.add_argument('--trainsamples', type=int, help='Limit the number of train samples per epoch', default=-1)
 parser.add_argument('--valsamples', type=int, help='Limit the number of validation samples per epoch',
 default=6000)
 
 parser.add_argument('--logint', type=int, help='Training log interval (iterations)', default=100)
 parser.add_argument('--workers', type=int, help='Num workers for data loaders', default=6)
 parser.add_argument('--device', type=int, help='GPU device id', default=0)
 parser.add_argument('--seed', type=int, help='Random seed', default=0)
 
 parser.add_argument('--debug', action='store_true', help='Activate debug')
 parser.add_argument('--suffix', type=str, help='Suffix to default tag')
 
 parser.add_argument('--attention', action='store_true',
 help='Enable Tensorboard log of attention masks')
 parser.add_argument('--log_dir', type=str, help='Directory for saving the training logs',
 default='runs/binclass/')
 parser.add_argument('--models_dir', type=str, help='Directory for saving the models weights',
 default='weights/binclass/')
 
 args = parser.parse_args()
 
 |