首页 > 学院 > 开发设计 > 正文

【tensorflow1.0学习笔记】(14)实现经典的CNN卷积神经网络之四--ResNet

2019-11-06 07:41:56
字体:
来源:转载
供稿:网友

这里后期会补上中间的VGGNet和Inception,这里先附上ResNet代码,后期进行补充噢!

# coding=utf-8import tensorflow as tfimport collectionsimport timefrom datetime import datetimeimport math# contrib.slim中的一些功能和组件可以大大减少设计Inception Net的代码量slim = tf.contrib.slim# 使用collections.namedtuple设计Reset基本Block模块组的named tupe,并创建Block的类,但只包含数据结构,不包含具体方法。class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):    'A named tuple describiing a ResNet block.'"""定义一个降采样subsample的方法,参数包括input(输入),factor(采样因子)和scope。这个函数也非常简单,如果factor为1,则不做修改直接返回input;如果不为1,则使用slim.max_pool2d最大池化来实现,通过1×1的池化尺寸,stride作为步长,即可实现降采样。"""def subsample(inputs, factor, scope=None):    if factor == 1:        return inputs    else:        return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)# 创建一个卷积层def conv2d_same(inputs, num_outputs, kernel_size, stride, scope=None):    if stride == 1:        return slim.conv2d(inputs, num_outputs, kernel_size, stride=1,                           padding='SAME', scope=scope)    else:        pad_total = kernel_size - 1        pad_beg = pad_total // 2        pad_end = pad_total - pad_beg        inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],                                 [pad_beg, pad_end], [0, 0]])        return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,                           padding='VALID', scope=scope)# 定义堆叠Block的函数,参数中的net即为输入,blocks是之前定义的Block的class的列表,# 而out_collections则是用来收集各个end_points的collections。@slim.add_arg_scopedef stack_blocks_dense(net, blocks, outputs_collections):    for block in blocks:        with tf.variable_scope(block.scope, 'block', [net]) as sc:            for i, unit in enumerate(block.args):                with tf.variable_scope('unit_%d' % (i+1), values=[net]):                    unit_depth, unit.depth_bottleneck, unit_stride = unit                    net = block.unit_fn(net, depth=unit_depth,                                        depth_bottleneck=unit_depth_bottleneck, stride=unit_stride)                net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)    return net# 创建ReNet通用的arg_scope,关于arg_scope,用来定义某些函数的参数def resnet_arg_scope(is_training=True, weight_decay=0.0001,                     batch_norm_decay=0.997, batch_norm_epsilon=1e-5,                     batch_norm_scale=True):    batch_norm_params = {        'is_training': is_training,        'decay': batch_norm_decay,        'epsilon': batch_norm_epsilon,        'scale': batch_norm_scale        'updates_collections': tf.GraphKeys.UPDATE_OPS,    }    with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay),                        weights_initializer=slim.variance_scaling_initializer(),                        activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm_parames):        with slim.arg_scope([slim.batch_norm], **batch_norm_params):            with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:                return arg_sc# 定义核心的bottleneck残差学习单元,它是ResNet V2的沦文中提到的Full PReactivation Residual Unit的一o个变种# 它和Resnet V1中的残差学习单元的主要区别有两点:# 一是:在每一层前都用了Batch Normalization,# 二是:对输入进行preactivation,而不是在卷积进行激活函数处理。@slim.add_arg_scopedef bottleneck(inputs, depth, depth_bottleneck, stride,               outputs_collections=None, scope=None):    with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)        preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')        if depth == depth_in:            shortcut = subsample(inputs, stride, 'shortcut')        else:            shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,                                   normalizer_fn=None, activation_fn=None,                                   scope='shortcut')        residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,                               scope='conv1')        residual = conv2d_same(residual, depth_bottleneck, 3, stride=1,                               normalize_fn=None, activation_fn=None,                               scope='conv2')        residual = slim.conv2d(residual, depth, [1, 1], stride=1,                               normalizer_fn=None, activation_fn=None,                               scope='conv3')        output = shortcut +residual        return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)# 定义生成ResetNet V2的主函数,只要预先定义好网络的残差学习模块组blocks,它就可以生成对应的完整的ResNetdef resnet_v2(inputs, blocks, num_classes=None, global_pool=True,              include_root_block=True, reuse=None, scope=None):    with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:        end_point_collection = sc.original_name_scope + '_end_points'        with slim.arg_scope([slim.conv2d, bottleneck, stack_blocks_dense],                            outputs_collections=end_point_collection):            net = inputs            if include_root_block:                with slim.arg_scope([slim.conv2d], activation_fn=None,                                    normalizer_fn=None):                    net = conv2d_same(net, 64, 7, stride=2, scope='conv1')                net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')            net = stack_blocks_dense(net, blocks)            net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')            if global_pool:                net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)            if num_classes is not None:                net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,                                  normalizer_fn=None, scope='logits')                end_points = slim.utils.convert_collection_to_dict(end_point_collection)                if num_classes is not None:                    end_points['predictions'] = slim.softmax(net, scope='prediction')                return net, end_points# 推荐几个不同深度的ResNet网络配置,来设计层数分布为50, 101, 152和200的ResNetdef resnet_v2_50(inputs, num_classes=None, global_pool=True,                 reuse=True, scope='resnet_v2_50'):    blocks = [        Block('block1', bottleneck, [(256, 64, 1)]*2 + [(256, 64, 2)],        Block('block2', bottleneck, [(512, 128, 1)]*3 + [(512, 128, 2)]),        Block('block3', bottleneck, [(1024, 256, 1)]*5 + [(1024, 256, 2)]),        Block('block4', bottleneck, [(2048, 512, 1)]*3)]    return resnet_v2(inputs, blocks, num_classes, global_pool,                     include_root_block=True, reuse=reuse, scope=scope)# 101层的ResNet和50层相比,主要变化就是把4个Blocks的unit的数增加到接近4倍def resnet_v2_101(inputs, num_classes=None, global_pool=True,                  reuse=True, scope='resnet_v2_101'):    blocks = [        Block('block1', bottleneck, [(256, 64, 1)]*2 + [(256, 64, 2)]),        Block('block2', bottleneck, [(512, 128, 1)]*3 + [(512, 128, 2)]),        Block('block3', bottleneck, [(1024, 256, 1)]*22 + [(1024, 256, 2)]),        Block('block4', bottleneck, [(2048, 512, 1)]*3)]    return resnet_v2(inputs, blocks, num_classes, global_pool,                     include_root_block=True, reuse=reuse, scope=scope)# 然后152层的ResNet,则是将第二个Block的units数提高到8,# 将第三个Block的units数提高到36,Units数量提升的主要场所依然是第三个Block。def resnet_v2_152(inputs, num_classes=None, global_pool=True,                  reuse=True, scope='resnet_v2_152'):    blocks = [        Block('block1', bottleneck, [(256, 64, 1)]*2 + [(256, 64, 2)]),        Block('block2', bottleneck, [(512, 128, 1)]*7 + [(256, 128, 2)]),        Block('block3', bottleneck, [(1024, 256, 1)]*35 + [(512, 256, 2)]),        Block('block4', bottleneck, [(2048, 512, 1)]*3)]    return resnet_v2(inputs, blocks, num_classes, global_pool,                     include_root_block=True, reuse=reuse, scop=scope)# 最后,200层的ResNet相比152层的ResNet,没有继续提升第三个Block的units数,# 而是将第二个Block的units数一下子提升到了23def resnet_v2_200(inputs, num_classes=None, global_pool=True,                  reuse=True, scope='resnet_v2_200'):    blocks = [        Block('block1', bottleneck, [(256, 64, 1)]*2 + [(256, 64, 2)]),        Block('block2', bottleneck, [(512, 128, 1)]*23 + [(512, 128, 2)]),        Block('block3', bottleneck, [(1024, 512, 1)]*35 + [(1024, 512, 2)]),        Block('block4', bottleneck, [(2048, 512, 1)]*3)]    return resnet_v2(inputs, blocks, num_classes, global_pool,                     include_root_block=True, reuse=reuse, scope=scope)batch_size = 32height, width = 224, 224inputs = tf.random_uniform((batch_size, height, width, 3))with slim.arg_scope(resnet_arg_scope(is_training=False)):    net, end_points = resnet_v2_152(inputs, 1000)init = tf.global_variables_initializer()sess = tf.session()sess.run(init)num_batches = 100def time_tensorflow_run(session, target, info_string):    num_step_burn_in = 10    total_duration = 0.0    total_duration_squared = 0.0    """    进行num_batches + num_step_burn_in次迭代计算,使用time.time()记录时间,每次迭代通过session.run(target)执行。    在初始热身的num_step_burn_in次迭代后,每10轮迭代显示当前所需要的时间。    同时每轮total_duration和total_duration_squared累加,以便后面计算每轮耗时的均值和标准差。    """    for i in range(num_batches+num_step_burn_in):        start_time = time.time()        _ = session.run(target)        duration = time.time() - start_time        if i >= num_step_burn_in:            if not i % 10:                print('%s: step %d, duration = %.3f' %                      (datatime.now(), i-num_step_burn_in, duration))            total_duration += duration            total_duration_squared += duration*duration    # 在循环结束后,计算每轮迭代的平均耗时mm和标准差sd,最后将结果显示出来。    # 这样就完成来计算每轮迭代耗时的评测函数time_tensorflow_run.    mn = total_duration / num_batches    vr = total_duration_squared / num_batches - mn*mn    sd = math.sqrt(vr)    print('%s: %s across %d step, %.3f +/- %.3f sec / batch' %          (datatime.now(), info_string, num_batches, mn, sd))# 主函数部分time_tensorflow_run(sess, net, "Forward")


发表评论 共有条评论
用户名: 密码:
验证码: 匿名发表