code stringlengths 42 43.2k | apis list | extract_api stringlengths 115 61.9k |
|---|---|---|
from absl import app, flags, logging
from absl.flags import FLAGS
import os
import tensorflow as tf
from modules.models import RRDB_Model, RRDB_Model_16x, RFB_Model_16x
from modules.lr_scheduler import MultiStepLR
from modules.losses import PixelLoss, PixelLossDown
from modules.utils import (load_yaml, load_dataset, l... | [
"evaluate.evaluate_dataset"
] | [((435, 509), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""cfg_path"""', '"""./configs/psnr.yaml"""', '"""config file path"""'], {}), "('cfg_path', './configs/psnr.yaml', 'config file path')\n", (454, 509), False, 'from absl import app, flags, logging\n'), ((510, 561), 'absl.flags.DEFINE_string', 'flags.DEF... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Distributed training using Pytorch boilerplate.
"""
import os
import logging
import random
import argparse
import warnings
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import resnet
from train import t... | [
"evaluate.evaluate"
] | [((444, 477), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (467, 477), False, 'import warnings\n'), ((488, 513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (511, 513), False, 'import argparse\n'), ((3973, 3990), 'random.seed', 'random.seed', ... |
import os
import time
import math
import argparse
import torch
import torch.nn as nn
from torch import optim
import matplotlib.pyplot as plt
from lazy_dataset import LazyDataset
from lstm import Seq2Seq
from train import train_model
from evaluate import evaluate_model
from utils import *
from bucket_sampler import B... | [
"evaluate.evaluate_model"
] | [((384, 400), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (395, 400), True, 'import matplotlib.pyplot as plt\n'), ((627, 659), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""batch"""'], {'fontsize': '(16)'}), "('batch', fontsize=16)\n", (637, 659), True, 'import matplotlib.pyplot as plt\n')... |
import torch
from tqdm import tqdm, trange
from evaluate import evaluate
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
import pandas as pd
import seaborn as sn
import numpy as np
import matplotlib.pyplot as plt
def test(model, criterion, dataloader, device, run):
run['test-config/device'] = d... | [
"evaluate.evaluate"
] | [((388, 473), 'evaluate.evaluate', 'evaluate', ([], {'model': 'model', 'criterion': 'criterion', 'dataloader': 'dataloader', 'device': 'device'}), '(model=model, criterion=criterion, dataloader=dataloader, device=device\n )\n', (396, 473), False, 'from evaluate import evaluate\n'), ((1511, 1561), 'sklearn.metrics.co... |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 17:57:09 2015
@author: Paco
"""
from utils import Utils
from evaluate import Evaluate
from metrics import Metrics
from gradient import Gradient
import numpy as np
# Load data
u = Utils()
train_facile = u.load_matrix('data/data_train_facile.mat')
#generate pairs
pair... | [
"evaluate.Evaluate"
] | [((232, 239), 'utils.Utils', 'Utils', ([], {}), '()\n', (237, 239), False, 'from utils import Utils\n'), ((524, 534), 'gradient.Gradient', 'Gradient', ([], {}), '()\n', (532, 534), False, 'from gradient import Gradient\n'), ((659, 668), 'metrics.Metrics', 'Metrics', ([], {}), '()\n', (666, 668), False, 'from metrics im... |
# coding: utf-8
# In[1]:
import logging
import os
import numpy as np
import torch
import torch.optim as optim
from tqdm import trange
from tqdm import tqdm_notebook as tqdm
import utils
import model.net as net
from model.data_loader import DataLoader
from evaluate import evaluate, f_score_simple
# In[2]:
# da... | [
"evaluate.evaluate",
"evaluate.f_score_simple"
] | [((476, 514), 'os.path.join', 'os.path.join', (['model_dir', '"""params.json"""'], {}), "(model_dir, 'params.json')\n", (488, 514), False, 'import os\n'), ((524, 547), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (536, 547), False, 'import utils\n'), ((598, 623), 'torch.cuda.is_available', 'tor... |
#!/usr/bin/env python3
"""Main script to run things"""
from data_utils import read_nmt_data, get_minibatch, read_config, hyperparam_string
from model import Seq2Seq, Seq2SeqAttention, Seq2SeqFastAttention
from criterions.matrixBLEU import mBLEU
from utils import onehot_initialization
from evaluate import evaluate_model... | [
"evaluate.evaluate_model"
] | [((535, 560), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (558, 560), False, 'import argparse\n'), ((717, 746), 'data_utils.read_config', 'read_config', (['config_file_path'], {}), '(config_file_path)\n', (728, 746), False, 'from data_utils import read_nmt_data, get_minibatch, read_config, h... |
import time
import datetime
import pytz
import argparse
import numpy as np
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import os
from PIL import Image
import torchvision.utils as vutils
from model.net import MonocularDepthModel
from model.loss import LossNetwo... | [
"evaluate.evaluate",
"evaluate.infer_depth"
] | [((4688, 4761), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training of depth estimation model"""'}), "(description='Training of depth estimation model')\n", (4711, 4761), False, 'import argparse\n'), ((614, 636), 'model.dataloader.DataLoaders', 'DataLoaders', (['data_path'], {}), '(d... |
'''
@author Waldinsamkeit
@email <EMAIL>
@create date 2020-10-20 15:58:45
@desc [description]
'''
from pathlib import Path
import os
from sklearn import metrics
from dataloader import DataSet
from datetime import datetime
import torch.nn as nn
import torch
import numpy as np
from log import logger
from typing import... | [
"evaluate.EvalUnit",
"evaluate.cluster_metrics_eval"
] | [((5616, 5650), 'evaluate.EvalUnit', 'EvalUnit', (['(0)', '(0)', '(0)', '(0)', '"""Evaluation"""'], {}), "(0, 0, 0, 0, 'Evaluation')\n", (5624, 5650), False, 'from evaluate import EvalUnit, binary_confusion_matrix_evaluate, cluster_metrics_eval\n'), ((6428, 6465), 'log.logger.info', 'logger.info', (['"""Validation Eval... |
import pyarrow
import argparse
import os
from typing import Any, Dict, List, Optional, Union
#import soundfile as sf
#import librosa
import torch
#from transformers import Wav2Vec2CTCTokenizer
#from transformers import Wav2Vec2FeatureExtractor
from transformers import Wav2Vec2Processor
from transformers import Wav2Ve... | [
"evaluate.Evaluator"
] | [((581, 630), 'sys.path.append', 'sys.path.append', (['"""/home/prsull/scratch/l2asr/src"""'], {}), "('/home/prsull/scratch/l2asr/src')\n", (596, 630), False, 'import sys\n'), ((783, 797), 'evaluate.Evaluator', 'ev.Evaluator', ([], {}), '()\n', (795, 797), True, 'import evaluate as ev\n'), ((1122, 1177), 'argparse.Argu... |
"""Train and evaluate the model"""
import argparse
import logging
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from tqdm import trange
import tools.utils as utils
import model.net as net
from tools.data_loader import DataLoader
from evaluate im... | [
"evaluate.evaluate"
] | [((345, 370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (368, 370), False, 'import argparse\n'), ((1222, 1244), 'tools.utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1242, 1244), True, 'import tools.utils as utils\n'), ((1290, 1307), 'tqdm.trange', 'trange', (['steps_num... |
from flask import Flask, render_template, request, abort
from utils.downloader import download_img,download_thumbnail
import sys
from evaluate import evaluate, mobilenet, nasnet
import os
import json
app = Flask(__name__)
fn = "temp/flask_test.jpg"
model = mobilenet()
model._make_predict_function()
#model2 = nasnet()
... | [
"evaluate.evaluate",
"evaluate.mobilenet"
] | [((207, 222), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (212, 222), False, 'from flask import Flask, render_template, request, abort\n'), ((258, 269), 'evaluate.mobilenet', 'mobilenet', ([], {}), '()\n', (267, 269), False, 'from evaluate import evaluate, mobilenet, nasnet\n'), ((446, 475), 'flask.rend... |
import argparse
import itertools
import os.path
import time, timeit
import sys
import dynet as dy
import numpy as np
import evaluate
import parse
import trees
import vocabulary
import gc
from collections import defaultdict
def format_elapsed(start_time):
elapsed_time = int(time.time() - start_time)
minutes, ... | [
"evaluate.evalb"
] | [((787, 815), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (808, 815), False, 'import sys\n'), ((909, 942), 'trees.load_trees', 'trees.load_trees', (['args.train_path'], {}), '(args.train_path)\n', (925, 942), False, 'import trees\n'), ((1106, 1137), 'trees.load_trees', 'trees.load_... |
"""Train the models"""
import logging
import os
import mlflow
import torch
from torch.autograd import Variable
from tqdm import tqdm
from datasets import create_data_loaders
from evaluate import evaluate
from evaluators import collect_metrics, collect_losses
from models import create_model
from models.net import col... | [
"evaluate.evaluate"
] | [((4720, 4735), 'seg_utils.visualizer.Visualizer', 'Visualizer', (['opt'], {}), '(opt)\n', (4730, 4735), False, 'from seg_utils.visualizer import Visualizer, get_visuals\n'), ((9369, 9390), 'torch.manual_seed', 'torch.manual_seed', (['(21)'], {}), '(21)\n', (9386, 9390), False, 'import torch\n'), ((9614, 9653), 'loggin... |
"""
测试检测器的精度
"""
import torch
import json
import time
import os, cv2
import tqdm
import numpy as np
from torchvision.transforms import transforms as cvtransforms
from torch.utils.data.dataloader import DataLoader
from lib.models.model_factory import create_model, load_model
from lib.datasets.jde import OriginDetDatase... | [
"evaluate.utils.cache_annotations",
"evaluate.utils.get_annotations_cache",
"evaluate.voc.voc_ap"
] | [((3227, 3239), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3236, 3239), False, 'import json\n'), ((3528, 3607), 'lib.datasets.jde.OriginDetDataset', 'OriginDetDataset', (['dataset_root', 'test_path'], {'augment': '(False)', 'transforms': 'transforms'}), '(dataset_root, test_path, augment=False, transforms=transfo... |
import sys
sys.path.append('./')
import os
import pickle
import gc
import faiss
import numpy as np
from transformers import HfArgumentParser, AutoConfig, DPRContextEncoder, DPRQuestionEncoder
from torch.optim import AdamW
from LibVQ.dataset.dataset import load_rel, write_rel
from LibVQ.learnable_index import Learnabl... | [
"evaluate.load_test_data",
"evaluate.validate"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (26, 32), False, 'import sys\n'), ((595, 624), 'faiss.omp_set_num_threads', 'faiss.omp_set_num_threads', (['(32)'], {}), '(32)\n', (620, 624), False, 'import faiss\n'), ((657, 671), 'LibVQ.utils.setuplogging', 'setuplogging', ([], {}), '(... |
import visual_visdom
import evaluate
#########################################################
## Callback-functions for evaluating model-performance ##
#########################################################
def _sample_cb(log, config, visdom=None, test_datasets=None, sample_size=64, iters_per_task=None):
'''... | [
"evaluate.metric_statistics",
"evaluate.precision"
] | [((2323, 2578), 'evaluate.precision', 'evaluate.precision', (['classifier', 'test_datasets', 'task', 'iteration'], {'classes_per_task': 'classes_per_task', 'scenario': 'scenario', 'test_size': 'test_size', 'visdom': 'visdom', 'summary_graph': 'summary_graph', 'with_exemplars': 'with_exemplars', 'otr_exemplars': 'otr_ex... |
import argparse
import itertools
import logging
import os
import time
from types import SimpleNamespace
import falcon
import pandas
import torch
from falcon_cors import CORS
import waitress
import numpy as np
import json
import re
from torch.utils.data import DataLoader
from data import Data
from evaluate import eval... | [
"evaluate.handy_tool",
"evaluate.evaluate"
] | [((581, 657), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)-18s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)-18s %(message)s')\n", (600, 657), False, 'import logging\n'), ((667, 686), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (684,... |
import torch
import torch.nn.functional as F
from settings import config
import numpy
import evaluate
from collections import defaultdict
import time
class Model(torch.nn.Module):
def __init__(self, data):
super(Model, self).__init__()
# Performance score
self.score = 0
self.best_score = 0
# Filename
se... | [
"evaluate.image_to_text",
"evaluate.text_to_image"
] | [((477, 532), 'torch.nn.Embedding', 'torch.nn.Embedding', (['num_words', "config['word_dimension']"], {}), "(num_words, config['word_dimension'])\n", (495, 532), False, 'import torch\n'), ((547, 616), 'torch.nn.LSTM', 'torch.nn.LSTM', (["config['word_dimension']", "config['model_dimension']", '(1)'], {}), "(config['wor... |
import _init_path
from pgd import PGD
from utils import save_kitti_format, transform_matrix, create_dataloader, create_logger
from evaluate import evaluate
from functools import reduce
from pyquaternion import Quaternion
import tqdm
import re
from datetime import datetime
import lib.utils.iou3d.iou3d_utils as iou3d_uti... | [
"evaluate.evaluate"
] | [((859, 879), 'numpy.random.seed', 'np.random.seed', (['(1024)'], {}), '(1024)\n', (873, 879), True, 'import numpy as np\n'), ((934, 959), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (957, 959), False, 'import argparse\n'), ((6129, 6153), 'os.path.isfile', 'os.path.isfile', (['filename'], {}... |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | [
"evaluate.evaluate"
] | [((1163, 1196), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1186, 1196), False, 'import warnings\n'), ((1223, 1255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['__doc__'], {}), '(__doc__)\n', (1246, 1255), False, 'import argparse\n'), ((3662, 3692), 'paddl... |
import argparse
import torch
from models import ConvNet_sem_seg
import load_data
import evaluate
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def add_args(parser):
# general options
parser.add_argument("--epochs", type=int, default=20)
parser.add_argument("--batch_size", typ... | [
"evaluate.get_metrics"
] | [((1311, 1480), 'models.ConvNet_sem_seg', 'ConvNet_sem_seg', ([], {'dim_in': '(2 * args.bandwidth)', 'f_in': '(1)', 'fs': 'args.feature_numbers', 'f_out': '(11)', 'k_sizes': 'args.kernel_sizes', 'strides': 'args.strides', 'use_skips': 'args.use_skips'}), '(dim_in=2 * args.bandwidth, f_in=1, fs=args.feature_numbers,\n ... |
"""Train the model"""
import argparse
import logging
import os
import os.path
import pickle
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from tqdm import tqdm
import utils
import model.net as net
import model.data_loader as da... | [
"evaluate.evaluate"
] | [((370, 395), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (393, 395), False, 'import argparse\n'), ((1680, 1702), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1700, 1702), False, 'import utils\n'), ((3567, 3617), 'logging.info', 'logging.info', (["('- Train metrics: ' +... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
last mod 7/2/18
what are last two features in MonoGRnet output?
usage for new detector:
first disable metrics check
min_sensor_prob to <<0
use simple scoreToProb
use the plots to figure out a good scoreToProb function
then you can run metrics check
curre... | [
"evaluate.MetricAvgPrec",
"evaluate.soMetricIoU"
] | [((903, 992), 'numpy.where', 'np.where', (['(score < 0.7)', '(score * 0.2 / 0.7 + 0.05)', '(score * 0.75 / 0.3 + 1 - 0.75 / 0.3)'], {}), '(score < 0.7, score * 0.2 / 0.7 + 0.05, score * 0.75 / 0.3 + 1 - \n 0.75 / 0.3)\n', (911, 992), True, 'import numpy as np\n'), ((2254, 2269), 'evaluate.MetricAvgPrec', 'MetricAvgP... |
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import argparse
import os
import json
from models.StyleSpeech import StyleSpeech
from dataloader import prepare_dataloader
from optimizer import ScheduledOptim
from evaluate import evaluate
import utils
def load_checkpoint... | [
"evaluate.evaluate"
] | [((369, 400), 'os.path.isfile', 'os.path.isfile', (['checkpoint_path'], {}), '(checkpoint_path)\n', (383, 400), False, 'import os\n'), ((498, 525), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (508, 525), False, 'import torch\n'), ((1030, 1056), 'utils.get_param_num', 'utils.get_param_n... |
import os
import logging
import time
import random
import re
import json
from copy import deepcopy
import numpy as np
import torch
from torch.optim import Adam
from tqdm import tqdm
from transformers import BertTokenizer
from dora import DORA
from config import Config
from reader import Reader
import ontology
from db... | [
"evaluate.MultiWozEvaluator"
] | [((411, 436), 'logging.getLogger', 'logging.getLogger', (['"""DORA"""'], {}), "('DORA')\n", (428, 436), False, 'import logging\n'), ((492, 515), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (513, 515), False, 'import logging\n'), ((563, 604), 'torch.cuda.set_device', 'torch.cuda.set_device', (['c... |
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import transformers
from transformers import AutoModel, BertTokenizerFast
import matplotlib.pyplot as plt
from underthesea import word_tokenize... | [
"evaluate.evaluate"
] | [((350, 370), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (362, 370), False, 'import torch\n'), ((1579, 1614), 'numpy.concatenate', 'np.concatenate', (['total_preds'], {'axis': '(0)'}), '(total_preds, axis=0)\n', (1593, 1614), True, 'import numpy as np\n'), ((1785, 1834), 'pandas.read_excel', 'p... |
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import argparse
import os
import time
from fastspeech2 import FastSpeech2
from loss import FastSpeech2Loss
from dataset import Dataset
from optimizer import Sche... | [
"evaluate.evaluate"
] | [((445, 465), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (462, 465), False, 'import torch\n'), ((599, 619), 'dataset.Dataset', 'Dataset', (['"""train.txt"""'], {}), "('train.txt')\n", (606, 619), False, 'from dataset import Dataset\n'), ((635, 766), 'torch.utils.data.DataLoader', 'DataLoader', ([... |
#pylint: disable = redefined-outer-name, invalid-name
# inbuilt lib imports:
from typing import List, Dict, Union
import os
import argparse
import random
import json
# external lib imports:
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, optimizers
# project impor... | [
"evaluate.evaluate"
] | [((1252, 1297), 'lib.data.generate_batches', 'generate_batches', (['train_instances', 'batch_size'], {}), '(train_instances, batch_size)\n', (1268, 1297), False, 'from lib.data import read_conll_data, generate_batches, load_embeddings, generate_training_instances\n'), ((2767, 2836), 'argparse.ArgumentParser', 'argparse... |
"""
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import random
import json
import argparse
from loguru import logger
import params
from e... | [
"evaluate.clustering_report_gt"
] | [((378, 400), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (389, 400), False, 'import random\n'), ((1230, 1255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1253, 1255), False, 'import argparse\n'), ((517, 529), 'json.load', 'json.load', (['f'], {}), '(f)\n', (526, 52... |
import numpy as np
from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing
from models import Waveform
from utils import load_groundtruth, read_csv, plot_metrics_classification_boxplot
from utils import plot_audio, plot_odf, plot_confusion_matrix, plot_evaluation_... | [
"evaluate.evaluate_classificator",
"evaluate.evaluate_activity_detection",
"evaluate.evaluate_system"
] | [((855, 887), 'pandas.read_csv', 'pd.read_csv', (['model_normalization'], {}), '(model_normalization)\n', (866, 887), True, 'import pandas as pd\n'), ((954, 982), 'models.Waveform', 'Waveform', ([], {'path': "(path + '.wav')"}), "(path=path + '.wav')\n", (962, 982), False, 'from models import Waveform\n'), ((997, 1027)... |
from robust_rcf import robust_rcf
import numpy as np
import pandas as pd
from evaluate import evaluate, anomaly_classification_percentile
from sklearn.metrics import accuracy_score
import time
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
def test_rrcf_simon(data, sample = 0.1):
# ... | [
"evaluate.evaluate",
"evaluate.anomaly_classification_percentile"
] | [((349, 497), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Timestamp', 'Year', 'Month', 'Day of Month', 'Day of Week', 'Hour',\n 'Minute', 'Seconds', 'Simon Features', 'file']"}), "(data, columns=['Timestamp', 'Year', 'Month', 'Day of Month',\n 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon F... |
"""
This runs the final configuration as reported in the paper.
"""
from config import base
import evaluate as e
config = base.get_config()
output_path = 'results/final.output.txt'
print("Running configuration: {}".format(config))
predictions = e.evaluate(config)
test_data = e.load_data(config['test_filepath'])
e.ou... | [
"evaluate.evaluate",
"evaluate.load_data",
"evaluate.output"
] | [((124, 141), 'config.base.get_config', 'base.get_config', ([], {}), '()\n', (139, 141), False, 'from config import base\n'), ((248, 266), 'evaluate.evaluate', 'e.evaluate', (['config'], {}), '(config)\n', (258, 266), True, 'import evaluate as e\n'), ((279, 315), 'evaluate.load_data', 'e.load_data', (["config['test_fil... |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions.normal import Normal
from torch.utils.data import DataLoader
import numpy as np
from datetime import datetime
import time
from pathlib import Path
import argparse
import math
from collections import d... | [
"evaluate.Evaluator"
] | [((640, 665), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (663, 665), False, 'import torch\n'), ((675, 718), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (687, 718), False, 'import torch\n'), ((6064, 6089), 'argparse.Argument... |
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, sampler
from tqdm import tqdm
from argument import get_args
from backbone import vovnet39, vovnet57, resnet50, resnet101
from utils.dataset import COCODataset, collate_fn
from model import ATSS,Efficientnet_Bifpn_ATSS
from utils import t... | [
"evaluate.evaluate"
] | [((1151, 1166), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1164, 1166), False, 'import torch\n'), ((2741, 2756), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2754, 2756), False, 'import torch\n'), ((797, 820), 'distributed.all_gather', 'all_gather', (['predictions'], {}), '(predictions)\n', (807, 820)... |
import sys
import os
import numpy as np
from shutil import copyfile
from stat import S_IREAD, S_IRGRP, S_IROTH
from network import TasnetWithDprnn
from train import train_network
from separate import Separator
from evaluate import Evaluator
FILE_LIST_DIR = '/data1/ditter/speechSeparation/preprocessedData/create-speak... | [
"evaluate.Evaluator"
] | [((361, 422), 'os.path.join', 'os.path.join', (['FILE_LIST_DIR', "('mix_2_spk_min_' + 'tr' + '_mix')"], {}), "(FILE_LIST_DIR, 'mix_2_spk_min_' + 'tr' + '_mix')\n", (373, 422), False, 'import os\n'), ((446, 507), 'os.path.join', 'os.path.join', (['FILE_LIST_DIR', "('mix_2_spk_min_' + 'cv' + '_mix')"], {}), "(FILE_LIST_D... |
'''
Reference implementation of Learn2Perturb.
Author: <NAME>
For more details, refer to the paper:
Learn2Perturb: an End-to-end Feature Perturbation Learning to Improve Adversarial Robustness
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Computer Vision and Pattern Recogniton (CVPR), 2020
'''
import... | [
"evaluate.evaluate"
] | [((609, 688), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Learn2Perturb for adversarial robustness"""'}), "(description='Learn2Perturb for adversarial robustness')\n", (632, 688), False, 'import argparse\n'), ((2691, 2792), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10... |
import os
import sys
import math
import argparse
import numpy as np
from tqdm import tqdm
import torch
from torch.multiprocessing import Queue, Process
sys.path.insert(0, '../lib')
sys.path.insert(0, '../model')
# from data.CrowdHuman import CrowdHuman
from data.CrowdHuman_json import CrowdHuman
from utils import mis... | [
"evaluate.compute_MMR.compute_MMR",
"evaluate.compute_APMR.compute_APMR"
] | [((154, 182), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../lib"""'], {}), "(0, '../lib')\n", (169, 182), False, 'import sys\n'), ((183, 213), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../model"""'], {}), "(0, '../model')\n", (198, 213), False, 'import sys\n'), ((604, 662), 'os.path.join', 'os.path.joi... |
"""Test model for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy <EMAIL>
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
... | [
"evaluate.evaluate"
] | [((1399, 1424), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1422, 1424), False, 'import torch\n'), ((2394, 2459), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'config.batch_size', 'shuffle': '(False)'}), '(test_set, batch_size=config.batch_size, shuffle=False)\... |
import argparse
import itertools
import os.path
import time
import torch
import torch.optim.lr_scheduler
import numpy as np
import evaluate
import trees
import vocabulary
import nkutil
import parse_nk
tokens = parse_nk
def torch_load(load_path):
if parse_nk.use_cuda:
return torch.load(load_path)
e... | [
"evaluate.evalb"
] | [((827, 1717), 'nkutil.HParams', 'nkutil.HParams', ([], {'max_len_train': '(0)', 'max_len_dev': '(0)', 'sentence_max_len': '(300)', 'learning_rate': '(0.0008)', 'learning_rate_warmup_steps': '(160)', 'clip_grad_norm': '(0.0)', 'step_decay': '(True)', 'step_decay_factor': '(0.5)', 'step_decay_patience': '(5)', 'max_cons... |
"""
测试检测器的精度
"""
import torch
import json
import time
import os, cv2
import tqdm
import numpy as np
from torchvision.transforms import transforms as cvtransforms
from torch.utils.data.dataloader import DataLoader
from lib.models.model_factory import create_model, load_model
from lib.datasets.jde import OriginDetDatase... | [
"evaluate.utils.cache_annotations",
"evaluate.utils.get_annotations_cache"
] | [((3157, 3169), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3166, 3169), False, 'import json\n'), ((3458, 3537), 'lib.datasets.jde.OriginDetDataset', 'OriginDetDataset', (['dataset_root', 'test_path'], {'augment': '(False)', 'transforms': 'transforms'}), '(dataset_root, test_path, augment=False, transforms=transfo... |
import torch.nn as nn
from torch.nn import functional as F
from pykp.masked_loss import masked_cross_entropy
from utils.statistics import LossStatistics
from utils.time_log import time_since, convert_time2str
from evaluate import evaluate_loss
import time
import math
import logging
import torch
import sys
import os
EP... | [
"evaluate.evaluate_loss"
] | [((459, 513), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['recon_x', 'x'], {'size_average': '(False)'}), '(recon_x, x, size_average=False)\n', (481, 513), True, 'from torch.nn import functional as F\n'), ((2170, 2265), 'logging.info', 'logging.info', (["('Overall sparsity = %.3f, l1 strength... |
"""
SouthPark Chatbot
"""
import os
import argparse
import torch
import config
from models import MobileHairNet
from trainer import Trainer
from evaluate import evalTest, evaluate, evaluateOne
from dataset import HairDataset, ImgTransformer
from utils import CheckpointManager
DIR_PATH = os.path.dirname(__file__)
US... | [
"evaluate.evaluate",
"evaluate.evalTest",
"evaluate.evaluateOne"
] | [((292, 317), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (307, 317), False, 'import os\n'), ((329, 354), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (352, 354), False, 'import torch\n'), ((364, 407), 'torch.device', 'torch.device', (["('cuda' if USE_CUDA else '... |
from pathlib import Path
import sys
sys.path.append(str(Path().absolute()))
import logging
log_level = "INFO"
logging.basicConfig(
filename=str(snakemake.log),
filemode="w",
level=log_level,
format="[%(asctime)s]:%(levelname)s: %(message)s",
datefmt="%d/%m/%Y %I:%M:%S %p",
)
from evaluate.report imp... | [
"evaluate.report.RecallReport.from_files"
] | [((612, 643), 'logging.info', 'logging.info', (['f"""Loading report"""'], {}), "(f'Loading report')\n", (624, 643), False, 'import logging\n'), ((660, 816), 'evaluate.report.RecallReport.from_files', 'RecallReport.from_files', (['recall_report_files_for_one_sample_and_all_gt_conf_percentiles'], {'concatenate_dfs_one_by... |
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import pickle
import logging
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.u... | [
"evaluate.Evaluator"
] | [((40, 77), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../utils"""'], {}), "(sys.path[0], '../utils')\n", (52, 77), False, 'import os\n'), ((1785, 1809), 'losses.get_loss_func', 'get_loss_func', (['loss_type'], {}), '(loss_type)\n', (1798, 1809), False, 'from losses import get_loss_func\n'), ((2247, 2332), 'o... |
from keras import backend as K
from keras.models import load_model
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds
from sacred.observers import FileStorageObserver
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback, TensorBoard
from utils.util import prepare_da... | [
"evaluate.evaluate"
] | [((734, 758), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (756, 758), True, 'import tensorflow as tf\n'), ((880, 910), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (904, 910), True, 'from keras import backend as K\n'), ((1535, 1579... |
# -*- coding: utf-8 -*-
"""
Python File Template
"""
import os
import sys
import argparse
import logging
import numpy as np
import time
import torchtext
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
import config
import utils
import copy
import torch
impor... | [
"evaluate.evaluate_beam_search"
] | [((3646, 3779), 'beam_search.SequenceGenerator', 'SequenceGenerator', (['model'], {'eos_id': 'opt.word2id[pykp.io.EOS_WORD]', 'beam_size': 'opt.beam_size', 'max_sequence_length': 'opt.max_sent_length'}), '(model, eos_id=opt.word2id[pykp.io.EOS_WORD], beam_size=\n opt.beam_size, max_sequence_length=opt.max_sent_lengt... |
import tqdm
import struct
import os
import numpy as np
import pickle
import json
import random
from collections import Counter
#from lightfm import LightFM
from scipy import sparse
from evaluate import evaluate, coverage
from sklearn.preprocessing import LabelBinarizer
from implicit.als import AlternatingLeastSquares
... | [
"evaluate.evaluate"
] | [((1453, 1472), 'numpy.mean', 'np.mean', (['popularity'], {}), '(popularity)\n', (1460, 1472), True, 'import numpy as np\n'), ((2236, 2250), 'numpy.sort', 'np.sort', (['array'], {}), '(array)\n', (2243, 2250), True, 'import numpy as np\n'), ((2286, 2318), 'numpy.arange', 'np.arange', (['(1)', '(array.shape[0] + 1)'], {... |
# -*- coding: utf-8 -*-
from evaluate import strict, loose_macro, loose_micro
def get_true_and_prediction(scores, y_data):
true_and_prediction = []
for score,true_label in zip(scores,y_data):
predicted_tag = []
true_tag = []
for label_id,label_score in enumerate(list(true_label)):
... | [
"evaluate.loose_macro",
"evaluate.loose_micro",
"evaluate.strict"
] | [((892, 919), 'evaluate.strict', 'strict', (['true_and_prediction'], {}), '(true_and_prediction)\n', (898, 919), False, 'from evaluate import strict, loose_macro, loose_micro\n'), ((955, 987), 'evaluate.loose_macro', 'loose_macro', (['true_and_prediction'], {}), '(true_and_prediction)\n', (966, 987), False, 'from evalu... |
import argparse
from evaluate import evaluate_model
from models import parse_class_weights
parser = argparse.ArgumentParser(
description="Provide a list of model files and a setting of class weights to visualize performance")
parser.add_argument('-m', type=str, nargs='+', help="list of paths to the saved model f... | [
"evaluate.evaluate_model"
] | [((102, 236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Provide a list of model files and a setting of class weights to visualize performance"""'}), "(description=\n 'Provide a list of model files and a setting of class weights to visualize performance'\n )\n", (125, 236), Fal... |
import numpy as np
import os
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch import optim
import random
import json
from collections import Counter
from torch.utils.data import Dataset, DataLoader, TensorDataset... | [
"evaluate.strict",
"evaluate.loose_micro",
"evaluate.loose_macro"
] | [((474, 530), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Code to Inference"""'}), "(description='Code to Inference')\n", (497, 530), False, 'import argparse\n'), ((1874, 1893), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1885, 1893), False, 'import pickle\n'), ((19... |
import os
import pandas as pd
import torch
from scipy.spatial import distance
from contextualized_topic_models.models.ctm import CombinedTM
from contextualized_topic_models.utils.preprocessing import WhiteSpacePreprocessing
from contextualized_topic_models.utils.data_preparation import TopicModelDataPreparation
from co... | [
"evaluate.evaluate_scores",
"evaluate.compute_jsd"
] | [((531, 556), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (554, 556), False, 'import argparse\n'), ((2010, 2080), 'contextualized_topic_models.utils.preprocessing.WhiteSpacePreprocessing', 'WhiteSpacePreprocessing', ([], {'documents': 'documents_raw', 'vocabulary_size': '(5000)'}), '(documen... |
# import dependencies
import pickle
import matplotlib as mpl
mpl.use('TKAgg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import tqdm
import visdom
import evaluate
# define function
def train(model, train_loader, train_size, val_loader, val_size, criterion,
optimizer, scheduler, epochs... | [
"evaluate.evaluate"
] | [((62, 78), 'matplotlib.use', 'mpl.use', (['"""TKAgg"""'], {}), "('TKAgg')\n", (69, 78), True, 'import matplotlib as mpl\n'), ((523, 538), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (536, 538), False, 'import visdom\n'), ((748, 770), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (758, 770... |
from typing import Dict, List, Any
import chess
import sys
import time
from evaluate import evaluate_board, move_value, check_end_game
debug_info: Dict[str, Any] = {}
def next_move(depth: int, board: chess.Board, debug=True) -> chess.Move:
"""
What is the next best move?
"""
debug_info.clear()
de... | [
"evaluate.evaluate_board",
"evaluate.move_value",
"evaluate.check_end_game"
] | [((351, 362), 'time.time', 'time.time', ([], {}), '()\n', (360, 362), False, 'import time\n'), ((767, 788), 'evaluate.check_end_game', 'check_end_game', (['board'], {}), '(board)\n', (781, 788), False, 'from evaluate import evaluate_board, move_value, check_end_game\n'), ((428, 439), 'time.time', 'time.time', ([], {}),... |
# ==================第一步: 文件写入raw_path
import argparse
# https://www.openslr.org/resources/93/data_aishell3.tgz 扔迅雷里面很快.
import yaml
from preprocessor import ljspeech, aishell3, libritts
def main(config):
if "LJSpeech" in config["dataset"]:
ljspeech.prepare_align(config)
if "AISHELL3" ... | [
"evaluate.evaluate"
] | [((4122, 4207), 'dataset.Dataset', 'Dataset', (['"""train.txt"""', 'preprocess_config', 'train_config'], {'sort': '(True)', 'drop_last': '(True)'}), "('train.txt', preprocess_config, train_config, sort=True, drop_last=True\n )\n", (4129, 4207), False, 'from dataset import Dataset\n'), ((4411, 4515), 'torch.utils.dat... |
import matplotlib
matplotlib.use('Agg')
import train
import dataset as ds
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from entity_lstm import EntityLSTM
import utils
import os
import conll_to_brat
import glob
import codecs
import shutil
import time
import copy
import evaluate
im... | [
"evaluate.evaluate_model",
"evaluate.save_results"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((753, 786), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (776, 786), False, 'import warnings\n'), ((1071, 1110), 'utils.get_current_time_in_milisec... |
#run_experiment.py
#Copyright (c) 2020 <NAME> <NAME>
#MIT License
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, mo... | [
"evaluate.clean_up_output_files",
"evaluate.evaluate_all",
"evaluate.plot_roc_curve_multi_class",
"evaluate.save",
"evaluate.plot_learning_curves",
"evaluate.save_final_summary",
"evaluate.plot_pr_curve_multi_class",
"evaluate.initialize_evaluation_dfs"
] | [((1429, 1446), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1443, 1446), True, 'import numpy as np\n'), ((1447, 1467), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (1464, 1467), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((1468, 1493), 'torch.cuda.manual... |
# -*- coding: utf-8 -*-
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import time
import math
import setting
from evaluate import evaluate
from buildVocab import readVocab
import torch
from ..metrics.metric import metricPair
def getMetirc(ref_str, gen_str):... | [
"evaluate.evaluate"
] | [((49, 63), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (56, 63), True, 'import matplotlib as mpl\n'), ((388, 400), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (398, 400), True, 'import matplotlib.pyplot as plt\n'), ((415, 429), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), ... |
from __future__ import print_function
import torch
import argparse
import sys, os
import os.path as osp
import h5py
from train_utils import save_model_epoch
from models import DSN
from train_eval import train
from train_utils import Logger, read_json, weights_init
from evaluate import evaluate
parser = argparse.Argum... | [
"evaluate.evaluate"
] | [((306, 419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Pytorch code for ultrasound video summarization using reinforcement learning"""'], {}), "(\n 'Pytorch code for ultrasound video summarization using reinforcement learning'\n )\n", (329, 419), False, 'import argparse\n'), ((2938, 2966), 'tor... |
from agents import *
from models import *
import copy
from evaluate import evaluate
import multiprocessing as mp
from itertools import product
#
class Tuner():
def __init__(self, model, algorithm, params_dict, passive=True):
self.safety = []
self.efficiency = []
self.collision_cnt = []
... | [
"evaluate.evaluate"
] | [((1327, 1412), 'evaluate.evaluate', 'evaluate', (['self.model', 'self.algorithm', '(False)', 'robot', 'param_str[:-2]', 'self.passive'], {}), '(self.model, self.algorithm, False, robot, param_str[:-2], self.passive\n )\n', (1335, 1412), False, 'from evaluate import evaluate\n'), ((1811, 1825), 'multiprocessing.cpu_... |
# Code adapted from https://github.com/guoyang9/NCF
import os
import time
import argparse
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import model
import evaluate
import data_utils
import adaptdl
i... | [
"evaluate.metrics"
] | [((391, 416), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (414, 416), False, 'import argparse\n'), ((3009, 3033), 'adaptdl.env.share_path', 'adaptdl.env.share_path', ([], {}), '()\n', (3031, 3033), False, 'import adaptdl\n'), ((3271, 3304), 'os.path.join', 'os.path.join', (['main_path', '"""... |
#!/usr/bin/env python3
import argparse
import os
import numpy as np
import time
import torch
from torch import optim
from torch import nn
import visual_plt
import utils
import evaluate
from data import get_multitask_experiment
from encoder import Classifier
from vae_models import AutoEncoder
import callbacks as cb
from... | [
"evaluate.show_reconstruction",
"evaluate.show_samples",
"evaluate.initiate_precision_dict",
"evaluate.validate"
] | [((401, 503), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""./main.py"""'], {'description': '"""Run individual continual learning experiment."""'}), "('./main.py', description=\n 'Run individual continual learning experiment.')\n", (424, 503), False, 'import argparse\n'), ((7178, 7217), 'torch.device',... |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import pandas as pd
import sys
sys.path.append("..")
sys.path.append("../technical-analysis_python/")
mpl.use('tkagg') # issues with Big Sur
# technical analysis
from strategy.macd_crossover import macdCrossover
from backtest import Backtest... | [
"evaluate.MaxDrawdown",
"evaluate.SharpeRatio",
"evaluate.PortfolioReturn",
"evaluate.CAGR"
] | [((108, 129), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (123, 129), False, 'import sys\n'), ((130, 178), 'sys.path.append', 'sys.path.append', (['"""../technical-analysis_python/"""'], {}), "('../technical-analysis_python/')\n", (145, 178), False, 'import sys\n'), ((179, 195), 'matplotlib.us... |
import tqdm
import struct
import os
import numpy as np
import pickle
import argparse
from scipy import sparse
from evaluate import evaluate
from implicit.als import AlternatingLeastSquares
os.environ["OPENBLAS_NUM_THREADS"] = "1"
user_features_filename = 'out_user_features_{}.feats'
item_features_filename = 'out_ite... | [
"evaluate.evaluate"
] | [((905, 938), 'numpy.array', 'np.array', (['feats'], {'dtype': 'np.float32'}), '(feats, dtype=np.float32)\n', (913, 938), True, 'import numpy as np\n'), ((1215, 1255), 'os.rename', 'os.rename', (["(out_fname + '.tmp')", 'out_fname'], {}), "(out_fname + '.tmp', out_fname)\n", (1224, 1255), False, 'import os\n'), ((1453,... |
import argparse
import functools
import itertools
import os.path
import os
import time
import torch
import torch.nn.functional as F
import numpy as np
from benepar import char_lstm
from benepar import decode_chart
from benepar import nkutil
from benepar import parse_chart
from benepar import InputSentence
from benep... | [
"evaluate.evalb"
] | [((908, 2400), 'benepar.nkutil.HParams', 'nkutil.HParams', ([], {'back_cycle': '(False)', 'back_layers': '(4)', 'back_loss_constant': '(1.0)', 'back_use_gold_trees': '(True)', 'back_loss_type': '"""kl"""', 'use_vq': '(False)', 'vq_decay': '(0.97)', 'vq_commitment': '(0.1)', 'vq_coreset_size_multiplier': '(10)', 'vq_wai... |
from methods import cusum
from methods import vae
import data_utils
import evaluate
import numpy as np
import pickle
from sklearn.decomposition import PCA
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
def _test_transferability(model_id, test_data_ids):
# TODO Get rid of the copy-paste fro... | [
"evaluate.metrics",
"evaluate.evaluate"
] | [((4763, 4811), 'evaluate.metrics', 'evaluate.metrics', (['vae_tp', 'vae_fp', 'vae_tn', 'vae_fn'], {}), '(vae_tp, vae_fp, vae_tn, vae_fn)\n', (4779, 4811), False, 'import evaluate\n'), ((4940, 5004), 'evaluate.metrics', 'evaluate.metrics', (['vae_rnn_tp', 'vae_rnn_fp', 'vae_rnn_tn', 'vae_rnn_fn'], {}), '(vae_rnn_tp, va... |
"""Train and evaluate the model"""
import os
import torch
import utils
import random
import logging
import argparse
import torch.nn as nn
from tqdm import trange
from evaluate import evaluate
from data_loader import DataLoader
from SequenceTagger import BertForSequenceTagging
from transformers.optimization import get_l... | [
"evaluate.evaluate"
] | [((410, 435), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (433, 435), False, 'import argparse\n'), ((1027, 1049), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1047, 1049), False, 'import utils\n'), ((1103, 1129), 'tqdm.trange', 'trange', (['params.train_steps'], {}), '(... |
from unittest.mock import patch, Mock, PropertyMock
from evaluate.vcf_file import VCFFile
from evaluate.vcf import NullVCFError, VCFFactory
import pytest
import pysam
from io import StringIO
@pytest.fixture
def pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample():
pysam_variant_record_mock_that_maps_to... | [
"evaluate.vcf.NullVCFError",
"evaluate.vcf_file.VCFFile"
] | [((346, 352), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (350, 352), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((753, 759), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (757, 759), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((1162, 1168), 'unittest.mock.Mock', 'Mock'... |
import argparse
import logging
import sys
from pathlib import Path
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard.writer import SummaryWriter
from tqdm import tqdm
import numpy as np... | [
"evaluate.evaluate"
] | [((528, 564), 'pathlib.Path', 'Path', (['"""dataset/origindataset/train/"""'], {}), "('dataset/origindataset/train/')\n", (532, 564), False, 'from pathlib import Path\n'), ((576, 612), 'pathlib.Path', 'Path', (['"""dataset/origindataset/label/"""'], {}), "('dataset/origindataset/label/')\n", (580, 612), False, 'from pa... |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from... | [
"evaluate.evaluate"
] | [((402, 429), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(-1)'}), '(dim=-1)\n', (421, 429), True, 'import torch.nn as nn\n'), ((801, 827), 'os.path.exists', 'os.path.exists', (['stats_path'], {}), '(stats_path)\n', (815, 827), False, 'import os\n'), ((2732, 2757), 'os.path.exists', 'os.path.exist... |
# Author: <NAME>
# Shanghai Jiao Tong University
# Code adapted from PointNetVlad code: https://github.com/jac99/MinkLoc3D.git
# Train on Oxford dataset (from PointNetVLAD paper) using BatchHard hard negative mining.
import os
import numpy as np
import open3d as o3d
import torch
from torch.utils.tensorboard import Sum... | [
"evaluate.evaluate"
] | [((1851, 1877), 'models.model_factory.model_factory', 'model_factory', (['self.params'], {}), '(self.params)\n', (1864, 1877), False, 'from models.model_factory import model_factory, load_weights\n'), ((1902, 1924), 'loss.metric_loss.make_loss', 'make_loss', (['self.params'], {}), '(self.params)\n', (1911, 1924), False... |
"""Entry point for training HSD-scripted.
Trains high-level role assignment policy with environment reward
Trains low-level action policies with role-specific rewards given by environment
"""
import json
import os
import random
import sys
import time
sys.path.append('../env/')
import numpy as np
import tensorflow a... | [
"evaluate.test_hierarchy"
] | [((254, 280), 'sys.path.append', 'sys.path.append', (['"""../env/"""'], {}), "('../env/')\n", (269, 280), False, 'import sys\n'), ((622, 642), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (636, 642), True, 'import numpy as np\n'), ((647, 664), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\... |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apa... | [
"evaluate.Evaluate"
] | [((1288, 1313), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1311, 1313), False, 'import argparse\n'), ((3860, 3927), 'nlp_architect.data.fasttext_emb.FastTextEmb', 'FastTextEmb', (['hparams.data_dir', 'hparams.src_lang', 'hparams.vocab_size'], {}), '(hparams.data_dir, hparams.src_lang, hpar... |
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dataset import BaseDataset
import torch
import torch.nn.functional as F
import time
import numpy as np
from config import model_name
from tqdm import tqdm
import os
from pathlib import Path
from evaluate import evaluate
impor... | [
"evaluate.evaluate"
] | [((2435, 2542), 'dataset.BaseDataset', 'BaseDataset', (['"""data/train/behaviors_parsed.tsv"""', '"""data/train/news_parsed.tsv"""', 'Config.dataset_attributes'], {}), "('data/train/behaviors_parsed.tsv', 'data/train/news_parsed.tsv',\n Config.dataset_attributes)\n", (2446, 2542), False, 'from dataset import BaseDat... |
from alfred.utils.config import parse_bool, load_dict_from_json, save_dict_to_json, parse_log_level
from alfred.utils.misc import create_logger, select_storage_dirs
from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists
from alfred.utils.recorder import Recorder
from alfred.utils.plots imp... | [
"evaluate.get_evaluation_args",
"evaluate.evaluate"
] | [((781, 790), 'seaborn.set', 'sns.set', ([], {}), '()\n', (788, 790), True, 'import seaborn as sns\n'), ((791, 817), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (804, 817), True, 'import seaborn as sns\n'), ((859, 884), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {... |
import json
import os
from argparse import ArgumentParser
import pandas as pd
import time
import torch
from torch.utils.data import DataLoader
from torch import nn, optim
import learn2learn as l2l
from AmazonDataset import AmazonDataset
from Model import Model
from evaluate import metrics
if __name__ == '__main__'... | [
"evaluate.metrics"
] | [((335, 351), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (349, 351), False, 'from argparse import ArgumentParser\n'), ((3204, 3227), 'pandas.read_csv', 'pd.read_csv', (['train_path'], {}), '(train_path)\n', (3215, 3227), True, 'import pandas as pd\n'), ((3242, 3264), 'pandas.read_csv', 'pd.read_csv'... |
from train import train
from test import test
from evaluate import evaluate
if __name__ == "__main__":
#groupings = ['normal','abnormal']
groupings = ['normal','entering','abnormal']
cfg = {
'experiment': 'normal',
'train_folder': 'data/train1715/normal/',
'test_folder': '... | [
"evaluate.evaluate"
] | [((663, 721), 'test.test', 'test', (['cfg'], {'dataset': '"""test"""', 'groupings': 'groupings', 'save': '(False)'}), "(cfg, dataset='test', groupings=groupings, save=False)\n", (667, 721), False, 'from test import test\n'), ((723, 768), 'test.test', 'test', (['cfg'], {'dataset': '"""val"""', 'groupings': 'groupings'})... |
import os
import re
import sys
sys.path.append('.')
import cv2
import math
import time
import scipy
import argparse
import matplotlib
import numpy as np
import pylab as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from sci... | [
"evaluate.coco_eval.get_outputs"
] | [((31, 51), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (46, 51), False, 'import sys\n'), ((1519, 1544), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1542, 1544), False, 'import argparse\n'), ((2009, 2033), 'lib.config.update_config', 'update_config', (['cfg', 'args']... |
"""
This script demonstrates initialisation, training, evaluation, and forecasting of ForecastNet. The dataset used for the
time-invariance test in section 6.1 of the ForecastNet paper is used for this demonstration.
Paper:
"ForecastNet: A Time-Variant Deep Feed-Forward Neural Network Architecture for Multi-Step-Ahead... | [
"evaluate.evaluate"
] | [((656, 673), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (670, 673), True, 'import numpy as np\n'), ((759, 801), 'dataHelpers.generate_data', 'generate_data', ([], {'T': '(2750)', 'period': '(50)', 'n_seqs': '(4)'}), '(T=2750, period=50, n_seqs=4)\n', (772, 801), False, 'from dataHelpers import gene... |
#!/usr/bin/env python
"""
This script launches model training in separate processes, one for each GPU.
"""
import fire
import logging
import multiprocessing
import os
import streamlit
import sys
import tarfile
import torch
from config import parse_config
from corpus import (
LanguageCorpus, BertCorpus, LowResolut... | [
"evaluate.beam_search"
] | [((488, 514), 'logging.getLogger', 'logging.getLogger', (['"""fr2en"""'], {}), "('fr2en')\n", (505, 514), False, 'import logging\n'), ((13935, 13958), 'fire.Fire', 'fire.Fire', (['PervasiveApp'], {}), '(PervasiveApp)\n', (13944, 13958), False, 'import fire\n'), ((4703, 4744), 'os.makedirs', 'os.makedirs', (['self._tmp_... |
import itertools
import json
import os
from datetime import date, datetime
from queue import Queue
from typing import List
import numpy as np
from qiskit import execute
from qiskit.providers.aer import Aer, AerJob
import config.load_config as cfg
import ibmq_account
import logger
from evaluate.circuit_gen import circ... | [
"evaluate.util.dict_to_array",
"evaluate.util.sv_to_probability",
"evaluate.circuit_gen.circ_gen"
] | [((2101, 2115), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2113, 2115), False, 'from datetime import date, datetime\n'), ((3030, 3050), 'config.load_config.load_or_create', 'cfg.load_or_create', ([], {}), '()\n', (3048, 3050), True, 'import config.load_config as cfg\n'), ((3055, 3095), 'logger.set_log_... |
from __future__ import print_function
import torch
import os.path as osp
import time
import datetime
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
from torch.distributions import Bernoulli
from train_utils import save_model_epoch
from rewards import compute_reward_det_c... | [
"evaluate.evaluate"
] | [((861, 886), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (884, 886), False, 'import torch\n'), ((1191, 1202), 'time.time', 'time.time', ([], {}), '()\n', (1200, 1202), False, 'import time\n'), ((5936, 5980), 'torch.tensor', 'torch.tensor', (['gt_labels'], {'dtype': 'torch.float32'}), '(gt_l... |
import json
import logging
import os
import shutil
import numpy as np
import pandas as pd
import logging
import click
import torch
from torch import nn
from torch.nn import functional as F
import torch.optim as optim
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn.utils.rnn import pack_padded_sequence
f... | [
"evaluate.evaluate_model"
] | [((758, 791), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (781, 791), False, 'import warnings\n'), ((1314, 1326), 'sacred.Experiment', 'Experiment', ([], {}), '()\n', (1324, 1326), False, 'from sacred import Experiment\n'), ((1542, 1581), 'sacred.observers.SlackObserver... |
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dataset import BaseDataset
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import numpy as np
from config import model_name
from tqdm import tqdm
import os
from pathlib import Path
from evaluate... | [
"evaluate.evaluate"
] | [((3468, 3570), 'dataset.BaseDataset', 'BaseDataset', (['"""data/train/behaviors_parsed.tsv"""', '"""data/train/news_parsed.tsv"""', '"""data/train/roberta"""'], {}), "('data/train/behaviors_parsed.tsv', 'data/train/news_parsed.tsv',\n 'data/train/roberta')\n", (3479, 3570), False, 'from dataset import BaseDataset\n... |
import os
import sys
import random
import torch
import math
import time
import argparse
import collections
import numpy as np
from torch import nn, optim
import torch.utils.data as data
import torch.nn.utils.rnn as rnn_utils
from itertools import chain
from data_process import Corpus, MyDataset, pretrain_corpus_constru... | [
"evaluate.test_evaluate",
"evaluate.valid_evaluate",
"evaluate.weighted_binary_cross_entropy"
] | [((511, 522), 'time.time', 'time.time', ([], {}), '()\n', (520, 522), False, 'import time\n'), ((682, 807), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_data'], {'collate_fn': 'train_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)', 'shuffle': '(True)'}), '(train_data, collate_fn=... |
from io import StringIO
import pandas as pd
from evaluate.classification import AlignmentAssessment
from evaluate.classifier import RecallClassifier
from evaluate.reporter import (
Reporter,
RecallReporter,
PrecisionReporter
)
from tests.common import (
create_classifier_with_two_entries,
create_c... | [
"evaluate.classifier.RecallClassifier",
"evaluate.reporter.PrecisionReporter",
"evaluate.reporter.RecallReporter"
] | [((8157, 8215), 'unittest.mock.patch.object', 'patch.object', (['Reporter', 'Reporter._generate_report.__name__'], {}), '(Reporter, Reporter._generate_report.__name__)\n', (8169, 8215), False, 'from unittest.mock import Mock, patch\n'), ((8463, 8521), 'unittest.mock.patch.object', 'patch.object', (['Reporter', 'Reporte... |
import chess
import random
from evaluate import evaluate
import copy
import os
import psutil
#count = 0
#best_move = chess.Move.from_uci("a2a3")
#best_move = None
#temp_move = None
bestMove = None
#def negamax(board: chess.Board, depth: int, max: int):
def negamax(board: chess.Board, depth: int, alpha: int, beta: in... | [
"evaluate.evaluate"
] | [((581, 596), 'evaluate.evaluate', 'evaluate', (['board'], {}), '(board)\n', (589, 596), False, 'from evaluate import evaluate\n'), ((2490, 2505), 'evaluate.evaluate', 'evaluate', (['board'], {}), '(board)\n', (2498, 2505), False, 'from evaluate import evaluate\n')] |
#!/usr/bin/env python
import torch.utils.data
import numpy as np
import random
import pickle
import matplotlib.pyplot as plt
from BERMUDA import training, testing
from pre_processing import pre_processing, read_cluster_similarity
from evaluate import evaluate_scores
from helper import cal_UMAP, plot_labels, plot_expr,... | [
"evaluate.evaluate_scores"
] | [((376, 393), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (387, 393), False, 'import random\n'), ((394, 414), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (408, 414), True, 'import numpy as np\n'), ((1424, 1434), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1432, 1434), T... |
import os
from datetime import datetime
import numpy as np
from torch.nn.utils import clip_grad_norm_
from torch.nn.utils.rnn import pad_sequence
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluate im... | [
"evaluate.evaluate"
] | [((726, 760), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (737, 760), False, 'import os\n'), ((774, 795), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['logdir'], {}), '(logdir)\n', (787, 795), False, 'from torch.utils.tensorboard import SummaryWriter\n... |
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
from transformers import AutoConfig, AutoTokenizer
from modeling import BertForSentimentClassification, AlbertForSentimentClassification, DistilBertForSentimentClassification
f... | [
"evaluate.evaluate"
] | [((508, 542), 'tqdm.trange', 'trange', (['args.num_eps'], {'desc': '"""Epoch"""'}), "(args.num_eps, desc='Epoch')\n", (514, 542), False, 'from tqdm import tqdm, trange\n'), ((1729, 1780), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_pat... |
import argparse
import torch
import json, ast
#from matplotlib import pyplot as plt
from osc_server import FlowServer
from osc_utils import generate_dataset
from utils.data import load_dataset
from evaluate import evaluate_dimensions, evaluate_dataset
from torch.utils.data import DataLoader
import numpy as np
import os... | [
"evaluate.evaluate_dimensions",
"evaluate.evaluate_dataset"
] | [((394, 419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (417, 419), False, 'import argparse\n'), ((2635, 2712), 'torch.utils.data.DataLoader', 'DataLoader', (['train_loader.dataset'], {'batch_size': '(64)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(train_loader.dataset, batch_size=64... |
import random
import numpy as np
import os
import logging
import torch
from utilities import get_device, current_utc_time
import pandas as pd
from imp import reload
from data_loader import get_loader, prepare_dataset
from transformers import AdamW, get_linear_schedule_with_warmup
from models import get_model
from train... | [
"evaluate.evaluate_model"
] | [((476, 491), 'imp.reload', 'reload', (['logging'], {}), '(logging)\n', (482, 491), False, 'from imp import reload\n'), ((610, 648), 'pandas.read_excel', 'pd.read_excel', (['"""./data/P3-Golden.xlsx"""'], {}), "('./data/P3-Golden.xlsx')\n", (623, 648), True, 'import pandas as pd\n'), ((726, 744), 'utilities.current_utc... |
import pickle
import gzip
from sparse_gp import SparseGP
import scipy.stats as sps
import numpy as np
import sys
import os
sys.path.append('%s/../prog_common' % os.path.dirname(os.path.realpath(__file__)))
from cmd_args import cmd_args
sys.path.append('%s/../prog_eval' % os.path.dirname(os.path.realpath(__file__))... | [
"evaluate.eval_at",
"evaluate.get_parser",
"evaluate.parse",
"evaluate.tokenize"
] | [((516, 558), 'evaluate.get_parser', 'evaluate.get_parser', (['cmd_args.grammar_file'], {}), '(cmd_args.grammar_file)\n', (535, 558), False, 'import evaluate\n'), ((2674, 2735), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Argparser for encoding"""'}), "(description='Argparser for enco... |
import argparse
import os
import math
import json
from datetime import datetime
from models import models
from db import db, Result
from uuid import uuid4, UUID
from keras import backend as K
import numpy as np
import evaluate
from data_gen import data
from config import config
def test_model(model, train, validat... | [
"evaluate.transform_binary_probabilities",
"evaluate.get_results",
"evaluate.get_labels",
"evaluate.load"
] | [((749, 788), 'evaluate.get_results', 'evaluate.get_results', (['model', 'validation'], {}), '(model, validation)\n', (769, 788), False, 'import evaluate\n'), ((860, 893), 'evaluate.get_results', 'evaluate.get_results', (['model', 'test'], {}), '(model, test)\n', (880, 893), False, 'import evaluate\n'), ((1770, 1813), ... |
from evaluate import evaluate_model
from models import *
from train import train_model
from visualize import visualize_model_performance
'''
Driver script for part 2 of the assignment'''
num_epochs = 50
model = starter_model()
default_model_path = train_model(model, str(model.name) + "_p2_default_weights", num_e... | [
"evaluate.evaluate_model"
] | [((590, 624), 'evaluate.evaluate_model', 'evaluate_model', (['default_model_path'], {}), '(default_model_path)\n', (604, 624), False, 'from evaluate import evaluate_model\n'), ((625, 678), 'evaluate.evaluate_model', 'evaluate_model', (['reduced_background_path', '[0.01, 0, 1]'], {}), '(reduced_background_path, [0.01, 0... |
import json
import logging
import os
import tensorflow as tf
import evaluate
import model
import preprocessing
import train
import utils
logger = logging.getLogger('eval_from_ckpt')
logger.setLevel(logging.INFO)
str_1 = "_1294"
str_2 = "_2588"
str_3 = "_3882"
str_4 = ""
_CKPT_TO_EPOCH_BATCH_414 = {
1: -1,
2:... | [
"evaluate.evaluate",
"evaluate.eval_adverts"
] | [((148, 183), 'logging.getLogger', 'logging.getLogger', (['"""eval_from_ckpt"""'], {}), "('eval_from_ckpt')\n", (165, 183), False, 'import logging\n'), ((2008, 2029), 'utils.get_data_path', 'utils.get_data_path', ([], {}), '()\n', (2027, 2029), False, 'import utils\n'), ((2112, 2175), 'utils.get_caption_image_names', '... |
import os
import tensorflow as tf
from train import train
from evaluate import evaluate
from data_structure import load_data
flags = tf.app.flags
flags.DEFINE_string('gpu', '0', 'visible gpu')
flags.DEFINE_string('mode', 'train', 'set train or eval')
flags.DEFINE_string('datadir', 'data', 'directory of input data... | [
"evaluate.evaluate"
] | [((2083, 2100), 'data_structure.load_data', 'load_data', (['config'], {}), '(config)\n', (2092, 2100), False, 'from data_structure import load_data\n'), ((2959, 2971), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2969, 2971), True, 'import tensorflow as tf\n'), ((2768, 2853), 'train.train', 'train', (['config... |
"""
This evaluates how the number of preceding POS tags affects the evaluation results.
"""
from config import base
import evaluate as e
config = base.get_config()
config['test_filepath'] = 'resources/test/teddev/data-with-doc.csv'
n_tags = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for n_tag in n_tags:
print("Running ... | [
"evaluate.evaluate",
"evaluate.load_data"
] | [((148, 165), 'config.base.get_config', 'base.get_config', ([], {}), '()\n', (163, 165), False, 'from config import base\n'), ((490, 508), 'evaluate.evaluate', 'e.evaluate', (['config'], {}), '(config)\n', (500, 508), True, 'import evaluate as e\n'), ((525, 561), 'evaluate.load_data', 'e.load_data', (["config['test_fil... |
from evaluate import evalb
from trees import load_trees
def test(tree_path='data/22.auto.clean', evalb_path='EVALB'):
dev_trees = load_trees(tree_path)
score = evalb(evalb_path, dev_trees, dev_trees)
spec = locals()
spec.pop('dev_trees')
for key, val in spec.items():
print(key, val)
test() | [
"evaluate.evalb"
] | [((132, 153), 'trees.load_trees', 'load_trees', (['tree_path'], {}), '(tree_path)\n', (142, 153), False, 'from trees import load_trees\n'), ((163, 202), 'evaluate.evalb', 'evalb', (['evalb_path', 'dev_trees', 'dev_trees'], {}), '(evalb_path, dev_trees, dev_trees)\n', (168, 202), False, 'from evaluate import evalb\n')] |
# import ants
from sitkImageIO.itkdatawriter import sitk_write_lab,sitk_write_image
import numpy as np
import SimpleITK as sitk
import os
from dirutil.helper import mkdir_if_not_exist
from dirutil.helper import sort_glob
from preprocessor.tools import rescale_one_dir
from evaluate.metric import calculate_binary_hd,calc... | [
"evaluate.metric.calculate_binary_dice",
"evaluate.metric.print_mean_and_std"
] | [((986, 1047), 'dirutil.helper.sort_glob', 'sort_glob', (["(args.dataset_dir + '/train_atlas/rez/img/*.nii.gz')"], {}), "(args.dataset_dir + '/train_atlas/rez/img/*.nii.gz')\n", (995, 1047), False, 'from dirutil.helper import sort_glob\n'), ((1062, 1127), 'dirutil.helper.sort_glob', 'sort_glob', (["(args.dataset_dir + ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.