Bootstrap/bootstrap-3.3.5-dist/css/bootstrap-theme

/*! * Bootstrap v3.3.5 (http://getbootstrap.com) * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ .btn-default, .btn-primary, .btn-success, .btn-info, .btn-warning, .btn-danger { text-shadow: 0 -1px 0 rgba(0, 0, 0, .2); -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075); } .btn-default:active, .btn-primary:active, .btn-success:active, .btn-info:active, .btn-warning:active, .btn-danger:active, .btn-default.active, .btn-primary.active, .btn-success.active, .btn-info.active, .btn-warning.active, .btn-danger.active { -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); } .btn-default.disabled, .btn-primary.disabled, .btn-success.disabled, .btn-info.disabled, .btn-warning.disabled, .btn-danger.disabled, .btn-default[disabled], .btn-primary[disabled], .btn-success[disabled], .btn-info[disabled], .btn-warning[disabled], .btn-danger[disabled], fieldset[disabled] .btn-default, fieldset[disabled] .btn-primary, fieldset[disabled] .btn-success, fieldset[disabled] .btn-info, fieldset[disabled] .btn-warning, fieldset[disabled] .btn-danger { -webkit-box-shadow: none; box-shadow: none; } .btn-default .badge, .btn-primary .badge, .btn-success .badge, .btn-info .badge, .btn-warning .badge, .btn-danger .badge { text-shadow: none; } .btn:active, .btn.active { background-image: none; } .btn-default { text-shadow: 0 1px 0 #fff; background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%); background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#e0e0e0)); background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #dbdbdb; border-color: #ccc; } .btn-default:hover, .btn-default:focus { background-color: #e0e0e0; background-position: 0 -15px; } .btn-default:active, .btn-default.active { background-color: #e0e0e0; border-color: #dbdbdb; } .btn-default.disabled, .btn-default[disabled], fieldset[disabled] .btn-default, .btn-default.disabled:hover, .btn-default[disabled]:hover, fieldset[disabled] .btn-default:hover, .btn-default.disabled:focus, .btn-default[disabled]:focus, fieldset[disabled] .btn-default:focus, .btn-default.disabled.focus, .btn-default[disabled].focus, fieldset[disabled] .btn-default.focus, .btn-default.disabled:active, .btn-default[disabled]:active, fieldset[disabled] .btn-default:active, .btn-default.disabled.active, .btn-default[disabled].active, fieldset[disabled] .btn-default.active { background-color: #e0e0e0; background-image: none; } .btn-primary { background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%); background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#265a88)); background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #245580; } .btn-primary:hover, .btn-primary:focus { background-color: #265a88; background-position: 0 -15px; } .btn-primary:active, .btn-primary.active { background-color: #265a88; border-color: #245580; } .btn-primary.disabled, .btn-primary[disabled], fieldset[disabled] .btn-primary, .btn-primary.disabled:hover, .btn-primary[disabled]:hover, fieldset[disabled] .btn-primary:hover, .btn-primary.disabled:focus, .btn-primary[disabled]:focus, fieldset[disabled] .btn-primary:focus, .btn-primary.disabled.focus, .btn-primary[disabled].focus, fieldset[disabled] .btn-primary.focus, .btn-primary.disabled:active, .btn-primary[disabled]:active, fieldset[disabled] .btn-primary:active, .btn-primary.disabled.active, .btn-primary[disabled].active, fieldset[disabled] .btn-primary.active { background-color: #265a88; background-image: none; } .btn-success { background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%); background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#419641)); background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #3e8f3e; } .btn-success:hover, .btn-success:focus { background-color: #419641; background-position: 0 -15px; } .btn-success:active, .btn-success.active { background-color: #419641; border-color: #3e8f3e; } .btn-success.disabled, .btn-success[disabled], fieldset[disabled] .btn-success, .btn-success.disabled:hover, .btn-success[disabled]:hover, fieldset[disabled] .btn-success:hover, .btn-success.disabled:focus, .btn-success[disabled]:focus, fieldset[disabled] .btn-success:focus, .btn-success.disabled.focus, .btn-success[disabled].focus, fieldset[disabled] .btn-success.focus, .btn-success.disabled:active, .btn-success[disabled]:active, fieldset[disabled] .btn-success:active, .btn-success.disabled.active, .btn-success[disabled].active, fieldset[disabled] .btn-success.active { background-color: #419641; background-image: none; } .btn-info { background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%); background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#2aabd2)); background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #28a4c9; } .btn-info:hover, .btn-info:focus { background-color: #2aabd2; background-position: 0 -15px; } .btn-info:active, .btn-info.active { background-color: #2aabd2; border-color: #28a4c9; } .btn-info.disabled, .btn-info[disabled], fieldset[disabled] .btn-info, .btn-info.disabled:hover, .btn-info[disabled]:hover, fieldset[disabled] .btn-info:hover, .btn-info.disabled:focus, .btn-info[disabled]:focus, fieldset[disabled] .btn-info:focus, .btn-info.disabled.focus, .btn-info[disabled].focus, fieldset[disabled] .btn-info.focus, .btn-info.disabled:active, .btn-info[disabled]:active, fieldset[disabled] .btn-info:active, .btn-info.disabled.active, .btn-info[disabled].active, fieldset[disabled] .btn-info.active { background-color: #2aabd2; background-image: none; } .btn-warning { background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%); background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#eb9316)); background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #e38d13; } .btn-warning:hover, .btn-warning:focus { background-color: #eb9316; background-position: 0 -15px; } .btn-warning:active, .btn-warning.active { background-color: #eb9316; border-color: #e38d13; } .btn-warning.disabled, .btn-warning[disabled], fieldset[disabled] .btn-warning, .btn-warning.disabled:hover, .btn-warning[disabled]:hover, fieldset[disabled] .btn-warning:hover, .btn-warning.disabled:focus, .btn-warning[disabled]:focus, fieldset[disabled] .btn-warning:focus, .btn-warning.disabled.focus, .btn-warning[disabled].focus, fieldset[disabled] .btn-warning.focus, .btn-warning.disabled:active, .btn-warning[disabled]:active, fieldset[disabled] .btn-warning:active, .btn-warning.disabled.active, .btn-warning[disabled].active, fieldset[disabled] .btn-warning.active { background-color: #eb9316; background-image: none; } .btn-danger { background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%); background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c12e2a)); background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #b92c28; } .btn-danger:hover, .btn-danger:focus { background-color: #c12e2a; background-position: 0 -15px; } .btn-danger:active, .btn-danger.active { background-color: #c12e2a; border-color: #b92c28; } .btn-danger.disabled, .btn-danger[disabled], fieldset[disabled] .btn-danger, .btn-danger.disabled:hover, .btn-danger[disabled]:hover, fieldset[disabled] .btn-danger:hover, .btn-danger.disabled:focus, .btn-danger[disabled]:focus, fieldset[disabled] .btn-danger:focus, .btn-danger.disabled.focus, .btn-danger[disabled].focus, fieldset[disabled] .btn-danger.focus, .btn-danger.disabled:active, .btn-danger[disabled]:active, fieldset[disabled] .btn-danger:active, .btn-danger.disabled.active, .btn-danger[disabled].active, fieldset[disabled] .btn-danger.active { background-color: #c12e2a; background-image: none; } .thumbnail, .img-thumbnail { -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075); box-shadow: 0 1px 2px rgba(0, 0, 0, .075); } .dropdown-menu > li > a:hover, .dropdown-menu > li > a:focus { background-color: #e8e8e8; background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8)); background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); background-repeat: repeat-x; } .dropdown-menu > .active > a, .dropdown-menu > .active > a:hover, .dropdown-menu > .active > a:focus { background-color: #2e6da4; background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%); background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4)); background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0); background-repeat: repeat-x; } .navbar-default { background-image: -webkit-linear-gradient(top, #fff 0%, #f8f8f8 100%); background-image: -o-linear-gradient(top, #fff 0%, #f8f8f8 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#f8f8f8)); background-image: linear-gradient(to bottom, #fff 0%, #f8f8f8 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-radius: 4px; -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075); } .navbar-default .navbar-nav > .open > a, .navbar-default .navbar-nav > .active > a { background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%); background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#dbdbdb), to(#e2e2e2)); background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0); background-repeat: repeat-x; -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075); box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075); } .navbar-brand, .navbar-nav > li > a { text-shadow: 0 1px 0 rgba(255, 255, 255, .25); } .navbar-inverse { background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%); background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#3c3c3c), to(#222)); background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-radius: 4px; } .navbar-inverse .navbar-nav > .open > a, .navbar-inverse .navbar-nav > .active > a { background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%); background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#080808), to(#0f0f0f)); background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0); background-repeat: repeat-x; -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25); box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25); } .navbar-inverse .navbar-brand, .navbar-inverse .navbar-nav > li > a { text-shadow: 0 -1px 0 rgba(0, 0, 0, .25); } .navbar-static-top, .navbar-fixed-top, .navbar-fixed-bottom { border-radius: 0; } @media (max-width: 767px) { .navbar .navbar-nav .open .dropdown-menu > .active > a, .navbar .navbar-nav .open .dropdown-menu > .active > a:hover, .navbar .navbar-nav .open .dropdown-menu > .active > a:focus { color: #fff; background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%); background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4)); background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0); background-repeat: repeat-x; } } .alert { text-shadow: 0 1px 0 rgba(255, 255, 255, .2); -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05); } .alert-success { background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#c8e5bc)); background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0); background-repeat: repeat-x; border-color: #b2dba1; } .alert-info { background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%); background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#b9def0)); background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0); background-repeat: repeat-x; border-color: #9acfea; } .alert-warning { background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#f8efc0)); background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0); background-repeat: repeat-x; border-color: #f5e79e; } .alert-danger { background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#e7c3c3)); background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0); background-repeat: repeat-x; border-color: #dca7a7; } .progress { background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#ebebeb), to(#f5f5f5)); background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0); background-repeat: repeat-x; } .progress-bar { background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%); background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#286090)); background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0); background-repeat: repeat-x; } .progress-bar-success { background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%); background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#449d44)); background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); background-repeat: repeat-x; } .progress-bar-info { background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#31b0d5)); background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); background-repeat: repeat-x; } .progress-bar-warning { background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#ec971f)); background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); background-repeat: repeat-x; } .progress-bar-danger { background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%); background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c9302c)); background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); background-repeat: repeat-x; } .progress-bar-striped { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .list-group { border-radius: 4px; -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075); box-shadow: 0 1px 2px rgba(0, 0, 0, .075); } .list-group-item.active, .list-group-item.active:hover, .list-group-item.active:focus { text-shadow: 0 -1px 0 #286090; background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%); background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2b669a)); background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0); background-repeat: repeat-x; border-color: #2b669a; } .list-group-item.active .badge, .list-group-item.active:hover .badge, .list-group-item.active:focus .badge { text-shadow: none; } .panel { -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .05); box-shadow: 0 1px 2px rgba(0, 0, 0, .05); } .panel-default > .panel-heading { background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8)); background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); background-repeat: repeat-x; } .panel-primary > .panel-heading { background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%); background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4)); background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0); background-repeat: repeat-x; } .panel-success > .panel-heading { background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#d0e9c6)); background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0); background-repeat: repeat-x; } .panel-info > .panel-heading { background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#c4e3f3)); background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0); background-repeat: repeat-x; } .panel-warning > .panel-heading { background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#faf2cc)); background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0); background-repeat: repeat-x; } .panel-danger > .panel-heading { background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%); background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#ebcccc)); background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0); background-repeat: repeat-x; } .well { background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); background-image: -webkit-gradient(linear, left top, left bottom, from(#e8e8e8), to(#f5f5f5)); background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0); background-repeat: repeat-x; border-color: #dcdcdc; -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1); box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1); } /*# sourceMappingURL=bootstrap-theme.css.map */

Larkkkkkkk


title: 文字篇1
date: 2019-09-26 18:49:31
tags: 文字篇


#                   定几个小目标!

1.Národní cena

2.Anglická úroveň čtyři

3.Absolvovaný student

4.Stovky provinciálních národních závodů

#                                 moc nízko nad.

py2neo

python和neo4j交互

前提

1
2
3
4
1.使用py2neo==4.3.0
2.代码导入:
from py2neo import Graph
from py2neo import Graph,Node,Relationship

创建图对象

1
2
3

#1.创建图对象
graph = Graph('http://localhost:7474', auth=("neo4j", "123456"))

创建数据对象

1
2
3
4
5
6

#2.创建数据对象
a=Node('person',name='Alice') #创建a节点
b=Node('person',name='Bob') #创建b节点
ab=Relationship(a,'KNOWS',b) #创建节点a和b的关系 (a)-[KNOWS]-(b)
graph.create(ab)

知识图谱

知识图谱

知识图谱定义

知识图谱的定义:

知识图谱的具体:

知识图谱相关领域

整体概况:

CQL语句

1
2


1
2


1
2


1
2


生物信息学

生物信息学概述

所需资料

1
2
3
1.视频:
山东大学生物信息学:https://www.bilibili.com/video/BV13t411372E/?spm_id_from=333.999.0.0
2.百度网盘(课件):https://pan.baidu.com/s/1WyBpOO2hs3qRJc7yipJW1A 提取码: 6esr

生物信息学研究对象

生物数据库分类

文献数据库(PubMed)

1
1.地址:https://pubmed.ncbi.nlm.nih.gov/

一级核酸数据库(GeneBank)

原核生物(大肠杆菌)dUTPase的NDA序列信息(X01714)

详细介绍1

详细介绍2

详细介绍3

1
2
3
1.浏览地址:https://www.ncbi.nlm.nih.gov/nuccore/X01714
2.详细介绍:如图所示
3.核酸序列: 页面GenBank:X01714.1下面有一行 FASTA(文本形式) Graphics(图形可视化)

真核生物(人)dUTPase的成熟mRNA的序列信息(U90223)

详细介绍

1
2
3
1.浏览地址:https://www.ncbi.nlm.nih.gov/nuccore/U90223
2.详细介绍:如图所示
3.核酸序列: 页面GenBank:X01714.1下面有一行 FASTA(文本形式) Graphics(图形可视化)

真核生物(人)dUTPase的DNA的序列信息(AH005568)

详细介绍

1
2
3
1.浏览地址:https://www.ncbi.nlm.nih.gov/nuccore/AH005568
2.详细介绍:如图所示
3.核酸序列: 页面GenBank:X01714.1下面有一行 FASTA(文本形式) Graphics(图形可视化)

一级核酸数据库(基因组数据库)

人基因组数据库(Ensemble)

1
2
3
4
5
1.地址:https://asia.ensembl.org/Homo_sapiens/Location/Genome
2.详细选择: 左下角图标human点击进入
3.知识点:
3.1 人的基因组有33亿个碱基分布在23个染色体上
3.2 目前已经获得人的全基因组序列

微生物宏基因组数据库(JCVI)

1
1.地址:https://www.jcvi.org/

二级核酸数据库

1
2
3
4
5
1.RefSeq数据库(参考序列数据库):通过自动及人工精选出的非冗余数据库[基因组序列、转录序列、蛋白质序列]
2.dbEST数据库(表达序列数据库):来源于不同物种的表达序列标签(EST)
3.Gene数据库:为用户提供基因序列注释和检索服务
4.ncRNAdb(非编码RNA数据库):提供非编码RNA的序列和功能信息(99种细菌、古细菌、真核生物的3万多条序列)
5.miRBase:主要存放已发表的microRNA序列和注释(可以分析microRNA在基因组中的定位和挖掘microRNA序列间的关系)

一级蛋白质序列数据库(UniProt)

三大数据库:

三大数据库合并:

UniProt解读

  1. 查找并搜索蛋白质数据库

  1. 点击第一条P33316查看内部情况

一级蛋白质结构数据库(PDB)

蛋白质的结构

PDB解读

  1. PDB网页搜索苏教授的duTPase蛋白质

  1. 浏览基本信息(点击Download Files–>PDB format)

  1. 基本信息-第一部分(基本信息部分)

  1. 基本信息-第二部分(一级结构信息部分)

  1. 基本信息-第三部分(二级结构信息部分)

  1. 基本信息-实验参数部分

  1. 基本信息-3D坐标部分

二级蛋白质数据库

二级蛋白质数据库分类

  1. CATH数据库

  1. SCOP2数据库

1
2
3
1.Pfam数据库:蛋白质结构域家族的集合
2.CATH数据库:数据库四种结构分类层次的首字母(结构域)
3.SCOP2数据库:详细描述蛋白质在结构、进化事件、功能类型三个方面的关系(将SCOP中仅基蛋白质的树状等级分类系统发展成为单向非循环网状分类系统)

专用数据库

KEGG

1
2
1.京都基因与基因组百科全书:关于基因、蛋白质、生化反应、通路的综合生物信息数据库(多个字库构成)
2.分类:KEGG下有很多分类

OMIM

1
2
1.人类孟德尔遗传文献:为临床医生和科研人员提供了关于遗传病相关技术
2.地址:https://www.ncbi.nlm.nih.gov/omim

序列

序列概念

1
2
1.蛋白质序列:由20个不同的字母(氨基酸)排列组合而成
2.核酸序列:由4个不同的字母(碱基)排列组合而成 --> DNA序列和RNA序列

序列相似性(序列一致度/序列相似度)

1
2
1.序列一致度:两个序列长度相同,那么他们一致度的定义为他们对应位置上相同的残基的数目/总长度的百分比
2.序列相似度:两个序列长度相同,那么他们相似度的定义为他们对应位置上相同+相似残基的数目/总长度的百分比

替换记分矩阵(反映残基两两相似的量化关系)

1
2
3
4
5
6
7
8
9
10
11
1.DNA替换记分矩阵:
1.1 等价矩阵
1.2 转换-颠换矩阵
1.3 BLAST矩阵

2.蛋白质替换记分矩阵:
2.1 等价矩阵
2.2 PAM矩阵
2.3 BLOSUM矩阵
2.4 遗传密码矩阵
2.5 疏水矩阵

序列比对

1
2
3
4
1.打点法
2.序列比对法
2.1 双序列全局比对法:Needleman-Wunsch算法
2.2 双序列局部比对法:Smith-Waterman算法

双序列全局比对法(Needleman-Wunsch算法)

双序列局部比对法(Smith-Waterman算法)

保守区域

系统发生树

基于距离的UPGMA法

1
2
1.根据序列两两间的距离的远近构建系统发生树
2.根据一步步合成新的矩阵(A-->AB-->CD-->ABCD)然后得到最终的系统发生树

综述提炼

参考方向

1
2
3
1.分析:偏向生物更多,需要从生物信息本质去挖掘和预测
2.算法:偏向计算机更多,需要代码能力更强(前景更大)
3.模型:用深度学习跑(DNN/RNN/CNN/MNN)

深度学习在生物信息学的应用

组学

1
2
3
4
1.蛋白质结构
2.基因表达调控
3.蛋白质分类
4.异常分类

成像

###

动手学深度学习

预备知识

数据操作

数据操作实现

读取和存储数据

1
2
3
4
5
6
7
8
9
10
11
12
13
import torch
#随意0-12
x=torch.arange(12) #使用arange创建一个行向量x
print(x)
print(x.shape) #根据张量的shape属性来访问张量的形状
print(x.numel()) #获取张量中元素的总数(形状的所有元素乘积,检查它的大小)
print(x.reshape(3,4)) #改变张量的形状 但是不改变元素数量和元素值 # x.reshape(-1,4) / x.reshape(3,-1) 通过-1来调用此自动计算出维度的功能
#全0
print(torch.zeros(2,3,4))
#全1
print(torch.ones(2,3,4))
#正态分布(均值0,标准差1)
print(torch.randn(3,4))

运算符

1
2
3
4
5
6
7
8
9
import torch
#进行四则运算
x = torch.tensor([1.0, 2, 4, 8])
y = torch.tensor([2, 2, 2, 2])
print(x+y)
print(x-y)
print(x*y)
print(x/y)
print(x**y)

广播机制

1
2
3
4
5
6
7
8
9
10
11
12
#1.通过适当复制元素来扩展一个或两个数组,以便在转换之后,两个张量具有相同的形状。 
#2.对生成的数组执行按元素操作。

import torch
a = torch.arange(3).reshape((3, 1)) # 3*1矩阵
b = torch.arange(2).reshape((1, 2)) # 1*2矩阵
print(a)
print(b)
#如果a+b的话因为两个形状不匹配 --> 广播成更大的3*2矩阵
#a将复制列
#b将赋值行
print(a+b)

索引和切片

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import torch
x=torch.arange(12).reshape((3, 4))
print(x)

#输出最后一个元素
print(x[-1])
#输出第二个和第三个元素
print(x[1:3]) #下标第一行和第二行

#指定索引写入一个
x[1,2]=9 #第二行第三个元素改为9
print(x)
#指定索引写入很多
x[0:2,:]=111 #第一行和第二行的所有列都改为111
print(x)

节省内存(创建一样大的0矩阵)

1
2
3
4
5
6
7
8
9
10
11
12
13
import torch
#原来的问题!!
Y=torch.arange(12).reshape((3,4))
X=torch.arange(12).reshape((3,4))
before=id(Y)
Y=Y+X
print(id(Y)==before) #Y被赋值新的内存,执行内存中新位置

#原地更新
Z=torch.zeros_like(Y) #创建一个和Y一样大的
print(id(Z))
Z[:]=X+Y
print(id(Z))

深度学习框架定义的张量->Numpy张量(ndarray)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
import torch
import numpy as np
X = torch.arange(12, dtype=torch.float32).reshape((3,4))
A=X.numpy()
B=torch.tensor(A)
print(type(A)) #ndarray数组
print(type(B)) #tensor向量

#调用item函数/Python内置函数
a=torch.tensor([3.5])
print(a)
print(a.item()) #调用item函数!!转变为ndarray数组
print(float(a))
print(int(a))

数据预处理(Pandas)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

import torch
import numpy as np
import pandas as pd
import os
#1.读取数据集
os.makedirs(os.path.join('..', 'data'), exist_ok=True)
data_file = os.path.join('..', 'data', 'house_tiny.csv') #创建 ../data/house_tiny.csv文件
with open(data_file, 'w') as f:
f.write('NumRooms,Alley,Price\n') # 列名
f.write('NA,Pave,127500\n') # 每行表示一个数据样本
f.write('2,NA,106000\n')
f.write('4,NA,178100\n')
f.write('NA,NA,140000\n')
data=pd.read_csv(data_file)
print(data)
print()

#2.处理缺失值(插值法和删除法)
inputs=data.iloc[:,0:2] #利用位置索引iloc --> data前两列为inputs data最后一列为outputs
outputs=data.iloc[:,2]
inputs=inputs.fillna(inputs.mean()) #利用均值填充
print(inputs)
print()

##对于inputs中的类别值/离散值
##Alley里面有Pava和NaN两种类型 --> pandas可以自动将此列分为两列(Alley_Pave[设置1]和Alley_nan[设置0]) 缺少的行会将“Alley_Pave”和“Alley_nan”分别设置为0和1
inputs=pd.get_dummies(inputs,dummy_na=True)
print(inputs)

#3.转换为张量格式(数值类型-->张量类型) torch.tensor()函数
X, y = torch.tensor(inputs.values), torch.tensor(outputs.values)
print(X)
print(y)

线性代数

标量/向量/矩阵/张量

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

import torch
import numpy as np
import pandas as pd
import os

#1.标量
x=torch.tensor(3.0)
y=torch.tensor(2.0)
print("x+y:",x+y)
print("x-y:",x-y)
print("x*y:",x*y)
print("x/y:",x/y)
print("x**y:",x**y)
print()

#2.向量([标量值1,标量值2,...,标量值N]) 一阶张量
x=torch.arange(4)
print("x:",x)
print()

#3.矩阵([向量1,向量2]) 二阶张量
A=torch.arange(20).reshape(5,4) #5行4列
print("A:",A) #输出A矩阵
print("A的转置:",A.T) #输出转置
##两个形状相同的矩阵相加
A=torch.arange(20, dtype=torch.float32).reshape(5, 4)
B=A.clone()
print("A:",A)
print("A+B:",A+B)
print()

#4.张量(大写字母)
##处理图像 -->n维数组[高度轴(Red红色)/宽度轴(Green绿色)/通道轴(Blue蓝色)]
X=torch.arange(24).reshape(2,3,4)
print("X:",X)
print()

降维

降维求和

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

import torch
import numpy as np
import pandas as pd
import os

x=torch.arange(4,dtype=torch.float32) # 0 1 2 3
print("x:",x)
print("x的总和:",x.sum())
print()

A=torch.arange(20, dtype=torch.float32).reshape(5, 4)
print("A:",A)
print("A的形状:",A.shape)
print("A的总和:",A.sum())
print()

#降维
##每列相加(↑)
A_sum_axis0=A.sum(axis=0) #按照轴0(第一行)将每列往上加到第一个位置[x1,x2,x3,x4]
print("A按照轴0:",A_sum_axis0)
print("A按照轴0的形状:",A_sum_axis0.shape)
print()
##每行相加(←)
A_sum_axis1=A.sum(axis=1) #按照轴1(第一列)将每行往左加到第一个位置[x1,x2,x3,x4]
print("A按照轴1:",A_sum_axis1)
print("A按照轴1的形状:",A_sum_axis1.shape)
print()
##行和列求和
A_sum_axis01=A.sum(axis=[0,1]) #按照轴1(第一列)将每行往左加到第一个位置[x1,x2,x3,x4]
print("A按照轴01:",A_sum_axis01)
print("A按照轴01的形状:",A_sum_axis01.shape)
print()

非降维求和(设置keepdims=True)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

import torch
import numpy as np
import pandas as pd
import os

A=torch.arange(20, dtype=torch.float32).reshape(5, 4)
print("A:",A)
print("A的形状:",A.shape)
print("A的总和:",A.sum())
print()

#非降维求和
sum_A=A.sum(axis=1,keepdims=True) #每行计算往最左边推(行有几个就有几个元素)
print(sum_A)

#沿着某个轴计算A元素的累计总和 --cumsum函数
print(A.cumsum(axis=0)) #第一行=第一行 第二行=第二行+第一行 第三行=第一行+第二行+第三行 ...

点积(dot)

1
2
3
4
5
6
7
8
9
10
11

import torch
import numpy as np
import pandas as pd
import os

x=torch.arange(4,dtype=torch.float32)
y=torch.ones(4,dtype=torch.float32)
print("x:",x)
print("x:",y)
print(torch.dot(x,y)) # 0*0+1*1+2*1+3*1=6

矩阵-向量积(mv)

1
2
3
4
5
6
7
8
9
10
11

import torch
import numpy as np
import pandas as pd
import os

x=torch.arange(4)
A=torch.arange(20).reshape(5,4)
print(x)
print(A)
print(torch.mv(A,x))

矩阵-矩阵乘法(mm)

1
2
3
4
5
6
7
8
9
10
11

import torch
import numpy as np
import pandas as pd
import os

A=torch.arange(20).reshape(5,4)
B=torch.arange(20).reshape(4,5)
print(A)
print(B)
print(torch.mm(A,B))

范数

1
2
3
4
5
6
7
8
9
10
11
12
13

import torch
import numpy as np
import pandas as pd
import os

# L2范数(欧几里得距离)
u=torch.tensor([3.0,-4.0])
print(torch.norm(u))

# L1范数(向量元素的绝对值之和)
print(torch.abs(u).sum()) # 3.0+4.0=7.0
print(torch.norm(torch.ones(4,9)))

微积分

导数

1
2
1.概念:
2.定义:

爬虫基础

了解网页结构

1
2
3
4
5
6
7
1.网页构成:
1.1 html
1.2 css
1.3 JavaScript
2.html介绍:
2.1 header部分:看到网页的元信息(比如像title标题)
2.2 body部分:可以看到网页的内容(p/a/h1等标签)

用到的网页html代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

<!DOCTYPE html>
<html lang="cn">
<head>
<meta charset="UTF-8">
<title>Scraping tutorial 1 | 莫烦Python</title>
<link rel="icon" href="https://morvanzhou.github.io/static/img/description/tab_icon.png">
</head>
<body>
<h1>爬虫测试1</h1>
<p>
这是一个在 <a href="https://morvanzhou.github.io/">莫烦Python</a>
<a href="https://morvanzhou.github.io/tutorials/data-manipulation/scraping/">爬虫教程</a> 中的简单测试.
</p>
</body>
</html>

匹配网页内容(两种方式)

正则表达式(regex库)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
from urllib.request import urlopen #python自带的打开
import re #正则表达式RegEx进行文字匹配
html=urlopen("https://morvanzhou.github.io/static/scraping/basic-structure.html").read().decode('utf-8')
print(html)

#1.初级页面匹配用正则表达式
##1.1 找到网页的title
res=re.findall(r"<title>(.+?)</title>",html)
print("\n文章的标题是: ",res[0])
##1.2 找到中间段落p
res=re.findall(r"<p>(.*?)</p>",html,flags=re.DOTALL) #re.DOTALL对这些tab new line不敏感
print("\n文章的中间段落是: ",res[0])
##1.3 找到所有的链接
res=re.findall(r'href="(.*?)"',html)
print("\n所有的链接:",res)

BeautifulSoup(bs4库的BeautifulSoup)

1
2
1.概念:是一个可以从HTML/XML文件中提取数据的Python库.它能够通过你喜欢的转换器实现惯用的文档导航,查找,修改文档的方式.Beautiful Soup会帮你节省数小时甚至数天的工作时间.
2.官网:https://www.crummy.com/software/BeautifulSoup/bs4/doc.zh/

解析网页:基础(按照标签名进行匹配)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#和刚才正则表达式对比:
from urllib.request import urlopen #python自带的打开
from bs4 import BeautifulSoup #bs4里面的
html=urlopen("https://morvanzhou.github.io/static/scraping/basic-structure.html").read().decode('utf-8')
#print(html)
#2.高级页面匹配用BeautifulSoup
soup=BeautifulSoup(html,features='lxml') #将刚才获取的地址 --> lxml格式保存
#输出soup的h标题
print(soup.h1)
#输出soup的p标签
print(soup.p)
#输出soup的a标签(特别多的话可以用find_all()找到所有选项)
all_href=soup.find_all('a') #将所有a找到 -- 但是里面会有很多其他杂质(<a href="xxx">爬虫教程</a>])
print(all_href)
for i in all_href:
print("a里面的地址:",i['href'])

解析网页:CSS(按照css的class进行匹配)

要准备的html网页:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
from urllib.request import urlopen #python自带的打开
import re #正则表达式RegEx进行文字匹配
from bs4 import BeautifulSoup #bs4里面的
html=urlopen("https://mofanpy.com/static/scraping/list.html").read().decode('utf-8')
#print(html)
soup=BeautifulSoup(html,features='lxml')

#1.找到所有class=month的信息
month=soup.find_all('li',{"class":"month"}) #根据li标签获取class=month的信息
for m in month:
print(m.get_text()) #获取li标签里面的所有文字标题

#2.找到class=jan的信息 然后在ul下面继续找ul内部的li信息(一层层嵌套)
jan=soup.find('ul',{"class":'jan'})
d_jan=jan.find_all('li')
for d in d_jan:
print(d.get_text())

解析网页:正则表达式

1
2
3
4
5
6
7
8
9
10
11
12
13
from urllib.request import urlopen #python自带的打开
import re #正则表达式RegEx进行文字匹配
from bs4 import BeautifulSoup #bs4里面的

#获取网址
html=urlopen("https://mofanpy.com/static/scraping/table.html").read().decode('utf-8')
#将页面保存到soup
soup=BeautifulSoup(html,features='lxml')
#获取所有的图片 后缀是jpg的图片()
img_links=soup.find_all("img",{"src":re.compile('.*?\.jpg')}) # .匹配任何字符 *匹配前一个字符0/无限次 ?前面的字符可有可无 \.就是匹配. --> xxx.jpg
#for循环遍历
for link in img_links:
print(link['src']) #根据src进行遍历

正则表达式

正则表达式匹配流程:

1
2
3
4
5
6
7
8
9
10
11

import re #正则表达式RegEx进行文字匹配
pattern1 = "cat"
pattern2 = "bird"
string = "dog runs to cat"
#使用普通字符串匹配
print(pattern1 in string) # True
print(pattern2 in string) # False
#使用正则表达式匹配
print(re.search(pattern1,string)) #匹配到了会说范围span和匹配的内容match
print(re.search(pattern2,string)) #没匹配到就是None

灵活匹配(pattern)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import re  #正则表达式RegEx进行文字匹配

pattern1 = "cat"
pattern2 = "bird"
string = "dog runs to cat"

#r"xxx"表示这是正则表达式
##1.r[au]n --> 匹配 ran/run
ptn=r"r[au]n"
print(re.search(ptn,string))
##2.r[A-Z]n --> 匹配rAn/rBn.../rZn
print(re.search(r"r[A-Z]n",string))
##3.r[a-z]n --> 匹配ran/rbn.../rzn
print(re.search(r"r[a-z]n",string))
##4.r[0-9]n --> 匹配r0n/r1n.../r9n
print(re.search(r"r[0-9]n]",string))
##5.r[0-9a-z] --> 匹配可以是数字也可以是任何字母
print(re.search(r"r[0-9a-z]n",string))

类型匹配(好多设定好的)

特殊的匹配类型:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import re  #正则表达式RegEx进行文字匹配

#匹配r(任何数字)n
print(re.search(r"r\dn","run r4n")) #匹配到r4n
#匹配r(不是数字)n
print(re.search(r"r\Dn","run r4n")) #匹配到run
#匹配r(任何white space)n --比如\t \n \r \f \v
print(re.search(r"r\sn", "r\nn r4n")) #匹配到r\nn
#匹配r(不是white space)n
print(re.search(r"r\Sn", "r\nn r4n")) #匹配到r4n
#匹配r(任何大小写字母和数字还有_ a-zA-Z0-9)
print(re.search(r"r\wn", "r\nn r4n")) #匹配到r4n
#匹配r(任何不是大小写字母和数字还有_ a-zA-Z0-9这个范围内)
print(re.search(r"r\Wn", "r\nn r4n")) #匹配到r\nn
#匹配r(只在某个字的开头/结尾的空白字符)n
print(re.search(r"r\bn", "dog runs to cat")) #什么都匹配不到
#匹配(不在某个字的开头/结尾的空白字符) runs (不在某个字的开头/结尾的空白字符)
print(re.search(r"\B runs \B", "dog runs to cat")) #匹配到 runs
#匹配runs(\)
print(re.search(r"runs\\", "runs\ to me")) #匹配到runs\\
#匹配r(任何字符 除了\n)n
print(re.search(r"r.n", "r[ns to me")) #匹配r[n
#匹配(开头)dog
print(re.search(r"^dog", "dog runs to cat")) #匹配dog
#匹配cat(结尾)
print(re.search(r"cat$", "dog runs to cat")) #匹配cat
#匹配Mon(day)可有可无 -->Monday和Mon都可以
print(re.search(r"Mon(day)?", "Monday")) #匹配Monday
print(re.search(r"Mon(day)?", "Mon")) #匹配Mon

#匹配多行字符串
#使用^形式匹配行开头的字符
string="""
dog runs to cat.
I run to dog.
"""
#匹配不到
print(re.search(r"^I",string))
#可以匹配到
#可以对每一行单独处理 flags=re.M / flags=re.MULTILINE
print(re.search(r"^I",string,flags=re.M))

重复匹配(重复出现)

重复匹配分类:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import re  #正则表达式RegEx进行文字匹配

# *重复0次/多次
print(re.search(r"ab*","a")) # a出现一次 b出现0次/多次 a
print(re.search(r"ab*","abbbbb")) # a出现一次 b出现0次/多次 abbbbb
print(re.search(r"ab*","abababab")) # a出现一次 b出现0次/多次 ab
print()

# +重复1次/多次
print(re.search(r"ab+","a")) # a出现一次 b出现一次/多次 None
print(re.search(r"ab+","ab")) #a出现一次 b出现一次/多次 ab
print(re.search(r"ab+","abb")) #a出现一次 b出现一次/多次 abb
print()

# {n,m}重复n至m次
print(re.search(r"ab{2,10}","a")) #a出现一次 b出现2-10次 None
print(re.search(r"ab{2,10}","ab")) #a出现一次 b出现2-10次 None
print(re.search(r"ab{2,10}","abb")) #a出现一次 b出现2-10次 abb
print(re.search(r"ab{2,10}","abbbb")) #a出现一次 b出现2-10次 abbbb
print(re.search(r"ab{2,10}","ababab")) #a出现一次 b出现2-10次 None

分组(re.search().group())

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import re  #正则表达式RegEx进行文字匹配

# match.group()表示返回所有组里的内容
string="ID: 021523, Date: Feb/12/2017"
match=re.search(r"(\d+), Date:(.+)",string) # (\d+)表示匹配数字重复1次或者多次 (.+)表示匹配任何字符(除了\n)
print(match.group()) #匹配出来 021526, Date:Feb/12/2017
print(match.group(1)) # 021526
print(match.group(2)) # Feb/12/2017
print()

# ?P<名字>
string= "ID: 021523, Date: Feb/12/2017"
match=re.search(r"(?P<id>\d+), Date:(?P<date>.+)",string) #匹配出来 id:021526 date:Date:Feb/12/2017
print(match.group('id')) # 021523
print(match.group('date')) # Date: Feb/12/2017

findall(全部)和or(|)

1
2
3
4
5
6
import re  #正则表达式RegEx进行文字匹配

#findall是找到所有的可能
#|是找到其中一个(要么是前者要么是后者)
print(re.findall(r"r[uae]n","run ran ren")) # ['run', 'ran', 'ren']
print(re.findall(r"(run|ran)","run ran ren")) # ['run', 'ran']

replace(re.sub())

1
2
3
4
5
import re  #正则表达式RegEx进行文字匹配

#re.sub()替换
#匹配rans|runs
print(re.sub(r"r[au]ns", "catches", "dog runs to cat")) #用catches替换掉runs/rans

split(re.split())

1
2
3
4
5
import re  #正则表达式RegEx进行文字匹配

#re.split()
string="a;b,c.d;]we"
print(re.split(r"[,;\.]",string)) #通过, ; \ .其中一个进行分割成单词

compile(将匹配的规则重复使用)

1
2
3
4
5
6
import re  #正则表达式RegEx进行文字匹配

#compile()
#使用compile过后的正则,对这个正则重复使用
compiled_re=re.compile(r"r[ua]n") #匹配的是run/ran
print(compiled_re.search("dog ran to cat")) #匹配ran/run --> ran

小抄

小练习-爬百度百科

步骤和要求

1
2
3
4
5
6
1.设定基础url路径
2.设定his存放/item/页面
3.设置url路径
4.读取url路径
5.将html设置到soup内部
6.输出相关的标题或者其他内容

爬取一个页面

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
from urllib.request import urlopen #python自带的打开
import re #正则表达式RegEx进行文字匹配
from bs4 import BeautifulSoup #bs4里面的
import random

#观看规律
#<a target="_blank" href="/item/%E8%9C%98%E8%9B%9B/8135707" data-lemmaid="8135707">蜘蛛</a>
#<a target="_blank" href="/item/%E8%A0%95%E8%99%AB">蠕虫</a>
#<a target="_blank" href="/item/%E9%80%9A%E7%94%A8%E6%90%9C%E7%B4%A2%E5%BC%95%E6%93%8E">通用搜索引擎</a>

#基础url路径
base_url="https://baike.baidu.com"
#将/item/...的页面都放在his中
his=["/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711"]

#根据基础url+his的地址
url=base_url+his[-1] # 爬取网页为:https://baike.baidu.com/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711
#将html放在soup里
html=urlopen(url).read().decode('utf-8')
soup=BeautifulSoup(html,features='lxml')
print(soup.find('h1').get_text()) #获取页面的h1标题
print('url:',his[-1])

for循环爬取

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

from urllib.request import urlopen #python自带的打开
import re #正则表达式RegEx进行文字匹配
from bs4 import BeautifulSoup #bs4里面的
import random

#观看规律
#<a target="_blank" href="/item/%E8%9C%98%E8%9B%9B/8135707" data-lemmaid="8135707">蜘蛛</a>
#<a target="_blank" href="/item/%E8%A0%95%E8%99%AB">蠕虫</a>
#<a target="_blank" href="/item/%E9%80%9A%E7%94%A8%E6%90%9C%E7%B4%A2%E5%BC%95%E6%93%8E">通用搜索引擎</a>

#基础url路径
base_url="https://baike.baidu.com"
#将/item/...的页面都放在his中
his=["/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711"]

for i in range(5):
#根据基础url+his的地址
url=base_url+his[-1] # 爬取网页为:https://baike.baidu.com/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711
#将html放在soup里
html=urlopen(url).read().decode('utf-8')
soup=BeautifulSoup(html,features='lxml')
print(soup.find('h1').get_text()) #获取页面的第一个h1标题
print('url:',his[-1]) #获取页面的标签下的地址

#找到所有url
sub_urls=soup.find_all("a",{
"target":"_blank",
"href":re.compile("/item/(%.{2})+$")}) #找到所有a标签 然后target="_blank" href标签都是/item/...
if len(sub_urls) != 0:
his.append(random.sample(sub_urls,1)[0]['href']) #如果可以就往下继续找 找到下一个标签的第一个位置
else:
his.pop() #如果没有就往回走一个页面
print(his)

Requests

get和post区别

1
2
1.get:取得(被动)	
2.post:发送(主动)控制了服务器返回的内容,可以进行个性化服务

request get请求

1
2
3
4
5
6
import requests
import webbrowser
param={"wd":"莫烦Python"} #wd=莫烦Python
r=requests.get('http://www.baidu.com/s',params=param)
print(r.url)
webbrowser.open(r.url) #用py打开默认浏览器

request post请求

1
2
3
4
5
import requests
import webbrowser
data={'firstname':'莫烦','lastname':'周'}
r=requests.post('https://pythonscraping.com/pages/files/processing.php',data=data)
print(r.text)

上传照片

1
2
3
4
5
import requests
import webbrowser
file = {'uploadFile': open('./1.jpg','rb')}
r=requests.post('http://pythonscraping.com/files/processing2.php',files=file)
print(r.text)

登录

1
2
3
4
5
6
7
8
9
10
11
12
13
14
1.使用 post 方法登录了第一个红框的 url
2.post 的时候, 使用了 Form data 中的用户名和密码
3.生成了一些cookies

import requests
import webbrowser
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
payload={'username':'Lark','password':'password'} #登录的账号和密码
r=requests.post('http://pythonscraping.com/pages/cookies/welcome.php',data=payload) #将账号密码通过post上传
print(r.cookies.get_dict()) #生成cookies

r=requests.get('http://pythonscraping.com/pages/cookies/profile.php',cookies=r.cookies,verify=False) #通过以前的cookies传入get请求 就可以通过已登录的名义访问get页面
print(r.text)

使用Session登录

1
2
3
4
5
6
7
8
9
10
11
12
import requests
import webbrowser
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

session=requests.Session() #获取session
payload={'username':'Lark','password':'password'} #登录的账号和密码
r=requests.post('http://pythonscraping.com/pages/cookies/welcome.php',data=payload) #将账号密码通过post上传
print(r.cookies.get_dict()) #生成cookies

r=requests.get('http://pythonscraping.com/pages/cookies/profile.php',cookies=r.cookies,verify=False) #通过以前的cookies传入get请求 就可以通过已登录的名义访问get页面
print(r.text)

下载文件(urllib.urlretrieve)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
import requests
import webbrowser
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import os
from urllib.request import urlretrieve

os.makedirs('./img/',exist_ok=True) #设定一个img文件夹
IMAGE_URL="https://static.mofanpy.com/static/img/description/learning_step_flowchart.png" #图片地址

urlretrieve(IMAGE_URL,'./img/image1.png') #urllib模块提供一个下载功能urlretrieve

r=requests.get(IMAGE_URL)
with open('./img/image2.png', 'wb') as f:
for chunk in r.iter_content(chunk_size=32): #可以通过控制每个chunk的大小 将大的文件按照一个个chunk存放
f.write(chunk)

小练习-下载美图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import requests
import webbrowser
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import os
from urllib.request import urlretrieve
from bs4 import BeautifulSoup

#获取url路径
URL="http://www.nationalgeographic.com.cn/animals/"
#用soup找到带有img_list的这种ul标签
html=requests.get(URL).text
soup=BeautifulSoup(html, 'lxml')
img_ul=soup.find_all('ul', {"class": "img_list"}) #找到所有ul标签里class=img_list
#从ul中找到所有的img,然后提取img的src属性(图片的网址)
for ul in img_ul:
imgs=ul.find_all('img') #获取所有img图片
url = img['src'] #url就是所有img图片的地址
r = requests.get(url, stream=True)
image_name = url.split('/')[-1]
with open('./img/%s' % image_name, 'wb') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
print('Saved %s' % image_name)

加速爬虫(多进程分布式)

分布式爬虫(multiprocessing)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import multiprocessing as mp
import time
from urllib.request import urlopen,urljoin
from bs4 import BeautifulSoup
import re
#基本路径url
base_url='https://mofanpy.com/'

#爬取网页crawl
def crawl(url):
response = urlopen(url)
time.sleep(0.1) # slightly delay for downloading
return response.read().decode()

#解析网页parse
def parse(html):
soup=BeautifulSoup(html,'lxml')
urls=soup.find_all('a',{"href":re.compile('^/.+?/$')})
title=soup.find('h1').get_text().strip()
page_urls = set([urljoin(base_url, url['href']) for url in urls]) # 去重
url = soup.find('meta', {'property': "og:url"})['content']
return title, page_urls, url

加速爬虫(异步加载Asyncio)

Asyncio库

1
2
3
1.Python的原装库
2.Python3.5之后
3.Python3.5:async和await协同工作

普通代码执行

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import time

#我们的job是按顺序执行的
#必须执行完job1才能开始执行job2
#而且job1需要1秒的执行时间,而job2需要2秒. 所以总时间是3秒多.

def job(t):
print('Start job:',t)
time.sleep(t) # wait for "t" seconds
print('Job',t,'takes:',t,'s')

def main():
[job(t) for t in range(1, 3)]

t1=time.time()
main()
print("NO async total time:",time.time()-t1)

async版代码执行

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import asyncio
import time

async def job(t):
print('Start job:',t)
await asyncio.sleep(t) # wait for "t" seconds
print('Job',t,'takes:',t,'s')

async def main(loop):
tasks = [
loop.create_task(job(t)) for t in range(1, 3)
] # 创建任务, 但是不执行
await asyncio.wait(tasks) # 执行并等待所有任务完成


t1=time.time()
loop=asyncio.get_event_loop() # 建立 loop
loop.run_until_complete(main(loop)) # 执行 loop
loop.close() # 关闭 loop
print("Async total time:", time.time()-t1)

aiohttp

aiohttp介绍:

1
2
1.aiohttp:可以将requests替换成aiohttp(换成异步requests)
2.aiohttp官网:https://docs.aiohttp.org/en/stable/index.html

一般的requests模块:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import time
import requests

#url地址
URL='https://mofanpy.com/'

def normal():
for i in range(5):
r=requests.get(URL) #获取url地址
url=r.url
print(url)

t1=time.time()
normal()
print("普通的全部时间:",time.time()-t1)

aiohttp模块:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import time
import requests
import aiohttp

async def job(session):
response = await session.get(URL) # 等待并切换
return str(response.url)

async def main(loop):
async with aiohttp.ClientSession() as session: # 官网推荐建立 Session 的形式
tasks = [loop.create_task(job(session)) for _ in range(2)]
finished, unfinished = await asyncio.wait(tasks)
all_results = [r.result() for r in finished] # 获取所有结果
print(all_results)

t1 = time.time()
loop=asyncio.get_event_loop() #建立loop
loop.run_until_complete(main(loop)) #执行loop
loop.close() #关闭loop
print("Async total time:", time.time() - t1)

Selenium

概念和安装

1
2
3
4
1.概念:它能够控制你的浏览器,有模有样地学人类"看"网页
2.安装:
2.1 pip3 install selenium
2.2 分为linux和macos/windows区别

Firefox浏览器插件(Katalon Recorder)

1
2
3
1.下载:https://addons.mozilla.org/en-US/firefox/addon/katalon-automation-record/
2.打开插件点击record进行网页操作
3.打开插件点击Export进行浏览代码

Python控制浏览器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
from selenium import webdriver

#打开火狐FireFox浏览器
#selenium.common.exceptions.WebDriverException: Message: ‘chromedriver' 就需要去根据chrome版本下载exe文件然后放在对应位置
driver=webdriver.Chrome(executable_path=r"D:\python\PyCharm 2021.1.1\plugins\python\helpers\typeshed\scripts\chromedriver.exe")

#将火狐插件Export记录的代码放入
driver.get("https://mofanpy.com/")
driver.find_element_by_xpath(u"//img[@alt='强化学习 (Reinforcement Learning)']").click()
driver.find_element_by_link_text("About").click()
driver.find_element_by_link_text(u"赞助").click()
driver.find_element_by_link_text(u"数据处理 ▾").click()
driver.find_element_by_link_text(u"网页爬虫").click()

#得到网页html
html=driver.page_source
driver.get_screenshot_as_file("./img/screenshot1.png")
driver.close()

Scrapy爬虫库

1
2


,