code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def get_arguments(): """Parse all the arguments provided from the CLI. Returns: A list of parsed arguments. """ parser = argparse.ArgumentParser(description="DeepLab-ResNet Network") parser.add_argument("--batch-size", type=int, default=BATCH_SIZE, help="Number of images sent to the network in one step.") parser.add_argument("--feat-dir", type=str, default=FEATSAVE_DIR, help="Path to the directory to save the semantic embedding vector map.") parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY, help="Path to the directory containing the PASCAL VOC dataset.") parser.add_argument("--grad-update-every", type=int, default=GRAD_UPDATE_EVERY, help="Number of steps after which gradient update is applied.") parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL, help="The index of the label to ignore during the training.") parser.add_argument("--input-size", type=str, default=INPUT_SIZE, help="Comma-separated string with height and width of images.") # parser.add_argument("--is-training", action="store_true", parser.add_argument("--is-training", action="store_false", help="Whether to update the running means and variances during the training.") parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE, help="Base learning rate for training with polynomial decay.") parser.add_argument("--momentum", type=float, default=MOMENTUM, help="Momentum component of the optimiser.") parser.add_argument("--not-restore-last", action="store_true", help="Whether to not restore last (FC) layers.") parser.add_argument("--num-classes", type=int, default=NUM_CLASSES, help="Number of classes to predict (including background).") parser.add_argument("--num-steps", type=int, default=NUM_STEPS, help="Number of training steps.") parser.add_argument("--power", type=float, default=POWER, help="Decay parameter to compute the learning rate.") parser.add_argument("--random-mirror", action="store_true", help="Whether to randomly mirror the inputs during the training.") parser.add_argument("--random-scale", action="store_true", help="Whether to randomly scale the inputs during the training.") parser.add_argument("--random-seed", type=int, default=RANDOM_SEED, help="Random seed to have reproducible results.") parser.add_argument("--restore-from", type=str, default=RESTORE_FROM, help="Where restore model parameters from.") parser.add_argument("--save-num-images", type=int, default=SAVE_NUM_IMAGES, help="How many images to save.") parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY, help="Save summaries and checkpoint every often.") parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR, help="Where to save snapshots of the model.") parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY, help="Regularisation parameter for L2-loss.") return parser.parse_args()
Parse all the arguments provided from the CLI. Returns: A list of parsed arguments.
get_arguments
python
iyah4888/SIGGRAPH18SSS
parse_opt.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/parse_opt.py
MIT
def __init__(self, sess, args): """Initialize the parameters. sess: tensorflow session """ self.sess = sess self.batch_size = args.batch_size self.args = args # parameters used to save a checkpoint self.dataset = "Hypcol" self.options = [] self._attrs = ['batch_size', 'dataset'] self.build_model()
Initialize the parameters. sess: tensorflow session
__init__
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/hc_deeplab.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/hc_deeplab.py
MIT
def image_scaling(img, label): """ Randomly scales the images between 0.5 to 1.5 times the original size. Args: img: Training image to scale. label: Segmentation mask to scale. """ scale = tf.random_uniform([1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None) h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[0]), scale)) w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[1]), scale)) new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1]) img = tf.image.resize_images(img, new_shape) label = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape) label = tf.squeeze(label, squeeze_dims=[0]) return img, label
Randomly scales the images between 0.5 to 1.5 times the original size. Args: img: Training image to scale. label: Segmentation mask to scale.
image_scaling
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/image_reader.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py
MIT
def image_mirroring(img, label): """ Randomly mirrors the images. Args: img: Training image to mirror. label: Segmentation mask to mirror. """ distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)[0] mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5) mirror = tf.boolean_mask([0, 1, 2], mirror) img = tf.reverse(img, mirror) label = tf.reverse(label, mirror) return img, label
Randomly mirrors the images. Args: img: Training image to mirror. label: Segmentation mask to mirror.
image_mirroring
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/image_reader.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py
MIT
def random_crop_and_pad_image_and_labels(image, label, crop_h, crop_w, ignore_label=255): """ Randomly crop and pads the input images. Args: image: Training image to crop/ pad. label: Segmentation mask to crop/ pad. crop_h: Height of cropped segment. crop_w: Width of cropped segment. ignore_label: Label to ignore during the training. """ label = tf.cast(label, dtype=tf.float32) label = label - ignore_label # Needs to be subtracted and later added due to 0 padding. combined = tf.concat(axis=2, values=[image, label]) image_shape = tf.shape(image) combined_pad = tf.image.pad_to_bounding_box(combined, 0, 0, tf.maximum(crop_h, image_shape[0]), tf.maximum(crop_w, image_shape[1])) last_image_dim = tf.shape(image)[-1] last_label_dim = tf.shape(label)[-1] combined_crop = tf.random_crop(combined_pad, [crop_h,crop_w,4]) img_crop = combined_crop[:, :, :last_image_dim] label_crop = combined_crop[:, :, last_image_dim:] label_crop = label_crop + ignore_label label_crop = tf.cast(label_crop, dtype=tf.uint8) # Set static shape so that tensorflow knows shape at compile time. img_crop.set_shape((crop_h, crop_w, 3)) label_crop.set_shape((crop_h,crop_w, 1)) return img_crop, label_crop
Randomly crop and pads the input images. Args: image: Training image to crop/ pad. label: Segmentation mask to crop/ pad. crop_h: Height of cropped segment. crop_w: Width of cropped segment. ignore_label: Label to ignore during the training.
random_crop_and_pad_image_and_labels
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/image_reader.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py
MIT
def read_labeled_image_list(data_dir, data_list): """Reads txt file containing paths to images and ground truth masks. Args: data_dir: path to the directory with images and masks. data_list: path to the file with lines of the form '/path/to/image /path/to/mask'. Returns: Two lists with all file names for images and masks, respectively. """ f = open(data_list, 'r') images = [] masks = [] for line in f: try: image, mask = line.strip("\n").split(' ') except ValueError: # Adhoc for test. image = mask = line.strip("\n") images.append(os.path.join(data_dir, image)) masks.append(os.path.join(data_dir, mask)) return images, masks
Reads txt file containing paths to images and ground truth masks. Args: data_dir: path to the directory with images and masks. data_list: path to the file with lines of the form '/path/to/image /path/to/mask'. Returns: Two lists with all file names for images and masks, respectively.
read_labeled_image_list
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/image_reader.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py
MIT
def read_images_from_disk(input_queue, input_size, random_scale, random_mirror, ignore_label, img_mean): # optional pre-processing arguments """Read one image and its corresponding mask with optional pre-processing. Args: input_queue: tf queue with paths to the image and its mask. input_size: a tuple with (height, width) values. If not given, return images of original size. random_scale: whether to randomly scale the images prior to random crop. random_mirror: whether to randomly mirror the images prior to random crop. ignore_label: index of label to ignore during the training. img_mean: vector of mean colour values. Returns: Two tensors: the decoded image and its mask. """ img_contents = tf.read_file(input_queue[0]) label_contents = tf.read_file(input_queue[1]) img = tf.image.decode_jpeg(img_contents, channels=3) img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img) img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32) # Extract mean. img -= img_mean label = tf.image.decode_png(label_contents, channels=1) if input_size is not None: h, w = input_size # Randomly scale the images and labels. if random_scale: img, label = image_scaling(img, label) # Randomly mirror the images and labels. if random_mirror: img, label = image_mirroring(img, label) # Randomly crops the images and labels. img, label = random_crop_and_pad_image_and_labels(img, label, h, w, ignore_label) return img, label
Read one image and its corresponding mask with optional pre-processing. Args: input_queue: tf queue with paths to the image and its mask. input_size: a tuple with (height, width) values. If not given, return images of original size. random_scale: whether to randomly scale the images prior to random crop. random_mirror: whether to randomly mirror the images prior to random crop. ignore_label: index of label to ignore during the training. img_mean: vector of mean colour values. Returns: Two tensors: the decoded image and its mask.
read_images_from_disk
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/image_reader.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py
MIT
def __init__(self, data_dir, data_list, input_size, random_scale, random_mirror, ignore_label, img_mean, coord): '''Initialise an ImageReader. Args: data_dir: path to the directory with images and masks. data_list: path to the file with lines of the form '/path/to/image /path/to/mask'. input_size: a tuple with (height, width) values, to which all the images will be resized. random_scale: whether to randomly scale the images prior to random crop. random_mirror: whether to randomly mirror the images prior to random crop. ignore_label: index of label to ignore during the training. img_mean: vector of mean colour values. coord: TensorFlow queue coordinator. ''' self.data_dir = data_dir self.data_list = data_list self.input_size = input_size self.coord = coord self.image_list, self.label_list = read_labeled_image_list(self.data_dir, self.data_list) self.images = tf.convert_to_tensor(self.image_list, dtype=tf.string) self.labels = tf.convert_to_tensor(self.label_list, dtype=tf.string) self.queue = tf.train.slice_input_producer([self.images, self.labels], shuffle=input_size is not None) # not shuffling if it is val self.image, self.label = read_images_from_disk(self.queue, self.input_size, random_scale, random_mirror, ignore_label, img_mean)
Initialise an ImageReader. Args: data_dir: path to the directory with images and masks. data_list: path to the file with lines of the form '/path/to/image /path/to/mask'. input_size: a tuple with (height, width) values, to which all the images will be resized. random_scale: whether to randomly scale the images prior to random crop. random_mirror: whether to randomly mirror the images prior to random crop. ignore_label: index of label to ignore during the training. img_mean: vector of mean colour values. coord: TensorFlow queue coordinator.
__init__
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/image_reader.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py
MIT
def dequeue(self, num_elements): '''Pack images and labels into a batch. Args: num_elements: the batch size. Returns: Two tensors of size (batch_size, h, w, {3, 1}) for images and masks.''' image_batch, label_batch = tf.train.batch([self.image, self.label], num_elements) return image_batch, label_batch
Pack images and labels into a batch. Args: num_elements: the batch size. Returns: Two tensors of size (batch_size, h, w, {3, 1}) for images and masks.
dequeue
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/image_reader.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py
MIT
def setup(self, is_training, num_classes): '''Network definition. Args: is_training: whether to update the running mean and variance of the batch normalisation layer. If the batch size is small, it is better to keep the running mean and variance of the-pretrained model frozen. num_classes: number of classes to predict (including background). ''' (self.feed('data') .conv(7, 7, 64, 2, 2, biased=False, relu=False, name='conv1') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn_conv1') .max_pool(3, 3, 2, 2, name='pool1') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(is_training=is_training, activation_fn=None, name='bn2a_branch1')) (self.feed('pool1') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2a_branch2a') .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2a_branch2b') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn2a_branch2c')) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2b_branch2a') .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2b_branch2b') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn2b_branch2c')) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2c_branch2a') .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2c_branch2b') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn2c_branch2c')) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1') .batch_normalization(is_training=is_training, activation_fn=None, name='bn3a_branch1')) (self.feed('res2c_relu') .conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3a_branch2a') .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3a_branch2b') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn3a_branch2c')) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b1_branch2a') .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b1_branch2b') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b1_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn3b1_branch2c')) (self.feed('res3a_relu', 'bn3b1_branch2c') .add(name='res3b1') .relu(name='res3b1_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b2_branch2a') .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b2_branch2b') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b2_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn3b2_branch2c')) (self.feed('res3b1_relu', 'bn3b2_branch2c') .add(name='res3b2') .relu(name='res3b2_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b3_branch2a') .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b3_branch2b') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b3_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn3b3_branch2c')) (self.feed('res3b2_relu', 'bn3b3_branch2c') .add(name='res3b3') .relu(name='res3b3_relu') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch1') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4a_branch1')) (self.feed('res3b3_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4a_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4a_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4a_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4a_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4a_branch2c')) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b1_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b1_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b1_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b1_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b1_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b1_branch2c')) (self.feed('res4a_relu', 'bn4b1_branch2c') .add(name='res4b1') .relu(name='res4b1_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b2_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b2_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b2_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b2_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b2_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b2_branch2c')) (self.feed('res4b1_relu', 'bn4b2_branch2c') .add(name='res4b2') .relu(name='res4b2_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b3_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b3_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b3_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b3_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b3_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b3_branch2c')) (self.feed('res4b2_relu', 'bn4b3_branch2c') .add(name='res4b3') .relu(name='res4b3_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b4_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b4_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b4_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b4_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b4_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b4_branch2c')) (self.feed('res4b3_relu', 'bn4b4_branch2c') .add(name='res4b4') .relu(name='res4b4_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b5_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b5_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b5_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b5_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b5_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b5_branch2c')) (self.feed('res4b4_relu', 'bn4b5_branch2c') .add(name='res4b5') .relu(name='res4b5_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b6_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b6_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b6_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b6_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b6_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b6_branch2c')) (self.feed('res4b5_relu', 'bn4b6_branch2c') .add(name='res4b6') .relu(name='res4b6_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b7_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b7_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b7_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b7_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b7_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b7_branch2c')) (self.feed('res4b6_relu', 'bn4b7_branch2c') .add(name='res4b7') .relu(name='res4b7_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b8_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b8_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b8_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b8_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b8_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b8_branch2c')) (self.feed('res4b7_relu', 'bn4b8_branch2c') .add(name='res4b8') .relu(name='res4b8_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b9_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b9_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b9_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b9_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b9_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b9_branch2c')) (self.feed('res4b8_relu', 'bn4b9_branch2c') .add(name='res4b9') .relu(name='res4b9_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b10_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b10_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b10_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b10_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b10_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b10_branch2c')) (self.feed('res4b9_relu', 'bn4b10_branch2c') .add(name='res4b10') .relu(name='res4b10_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b11_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b11_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b11_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b11_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b11_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b11_branch2c')) (self.feed('res4b10_relu', 'bn4b11_branch2c') .add(name='res4b11') .relu(name='res4b11_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b12_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b12_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b12_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b12_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b12_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b12_branch2c')) (self.feed('res4b11_relu', 'bn4b12_branch2c') .add(name='res4b12') .relu(name='res4b12_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b13_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b13_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b13_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b13_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b13_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b13_branch2c')) (self.feed('res4b12_relu', 'bn4b13_branch2c') .add(name='res4b13') .relu(name='res4b13_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b14_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b14_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b14_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b14_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b14_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b14_branch2c')) (self.feed('res4b13_relu', 'bn4b14_branch2c') .add(name='res4b14') .relu(name='res4b14_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b15_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b15_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b15_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b15_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b15_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b15_branch2c')) (self.feed('res4b14_relu', 'bn4b15_branch2c') .add(name='res4b15') .relu(name='res4b15_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b16_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b16_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b16_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b16_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b16_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b16_branch2c')) (self.feed('res4b15_relu', 'bn4b16_branch2c') .add(name='res4b16') .relu(name='res4b16_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b17_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b17_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b17_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b17_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b17_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b17_branch2c')) (self.feed('res4b16_relu', 'bn4b17_branch2c') .add(name='res4b17') .relu(name='res4b17_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b18_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b18_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b18_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b18_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b18_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b18_branch2c')) (self.feed('res4b17_relu', 'bn4b18_branch2c') .add(name='res4b18') .relu(name='res4b18_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b19_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b19_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b19_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b19_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b19_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b19_branch2c')) (self.feed('res4b18_relu', 'bn4b19_branch2c') .add(name='res4b19') .relu(name='res4b19_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b20_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b20_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b20_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b20_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b20_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b20_branch2c')) (self.feed('res4b19_relu', 'bn4b20_branch2c') .add(name='res4b20') .relu(name='res4b20_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b21_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b21_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b21_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b21_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b21_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b21_branch2c')) (self.feed('res4b20_relu', 'bn4b21_branch2c') .add(name='res4b21') .relu(name='res4b21_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b22_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b22_branch2a') .atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b22_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b22_branch2b') .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b22_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b22_branch2c')) (self.feed('res4b21_relu', 'bn4b22_branch2c') .add(name='res4b22') .relu(name='res4b22_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1') .batch_normalization(is_training=is_training, activation_fn=None, name='bn5a_branch1')) (self.feed('res4b22_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5a_branch2a') .atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5a_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5a_branch2b') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5b_branch2a') .atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5b_branch2b') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5b_branch2b') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a') .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5c_branch2a') .atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5c_branch2b') .batch_normalization(activation_fn=tf.nn.relu, name='bn5c_branch2b', is_training=is_training) .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c') .batch_normalization(is_training=is_training, activation_fn=None, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .atrous_conv(3, 3, num_classes, 6, padding='SAME', relu=False, name='fc1_voc12_c0')) (self.feed('res5c_relu') .atrous_conv(3, 3, num_classes, 12, padding='SAME', relu=False, name='fc1_voc12_c1')) (self.feed('res5c_relu') .atrous_conv(3, 3, num_classes, 18, padding='SAME', relu=False, name='fc1_voc12_c2')) (self.feed('res5c_relu') .atrous_conv(3, 3, num_classes, 24, padding='SAME', relu=False, name='fc1_voc12_c3')) (self.feed('fc1_voc12_c0', 'fc1_voc12_c1', 'fc1_voc12_c2', 'fc1_voc12_c3') .add(name='fc1_voc12'))
Network definition. Args: is_training: whether to update the running mean and variance of the batch normalisation layer. If the batch size is small, it is better to keep the running mean and variance of the-pretrained model frozen. num_classes: number of classes to predict (including background).
setup
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/model.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/model.py
MIT
def decode_labels(mask, num_images=1, num_classes=21): """Decode batch of segmentation masks. Args: mask: result of inference after taking argmax. num_images: number of images to decode from the batch. num_classes: number of classes to predict (including background). Returns: A batch with num_images RGB images of the same size as the input. """ n, h, w, c = mask.shape assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8) for i in range(num_images): img = Image.new('RGB', (len(mask[i, 0]), len(mask[i]))) pixels = img.load() for j_, j in enumerate(mask[i, :, :, 0]): for k_, k in enumerate(j): if k < num_classes: pixels[k_,j_] = label_colours[k] outputs[i] = np.array(img) return outputs
Decode batch of segmentation masks. Args: mask: result of inference after taking argmax. num_images: number of images to decode from the batch. num_classes: number of classes to predict (including background). Returns: A batch with num_images RGB images of the same size as the input.
decode_labels
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/utils.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py
MIT
def prepare_label(input_batch, new_size, num_classes, one_hot=True): """Resize masks and perform one-hot encoding. Args: input_batch: input tensor of shape [batch_size H W 1]. new_size: a tensor with new height and width. num_classes: number of classes to predict (including background). one_hot: whether perform one-hot encoding. Returns: Outputs a tensor of shape [batch_size h w 21] with last dimension comprised of 0's and 1's only. """ with tf.name_scope('label_encode'): input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp. input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension. if one_hot: input_batch = tf.one_hot(input_batch, depth=num_classes) return input_batch
Resize masks and perform one-hot encoding. Args: input_batch: input tensor of shape [batch_size H W 1]. new_size: a tensor with new height and width. num_classes: number of classes to predict (including background). one_hot: whether perform one-hot encoding. Returns: Outputs a tensor of shape [batch_size h w 21] with last dimension comprised of 0's and 1's only.
prepare_label
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/utils.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py
MIT
def inv_preprocess(imgs, num_images, img_mean): """Inverse preprocessing of the batch of images. Add the mean vector and convert from BGR to RGB. Args: imgs: batch of input images. num_images: number of images to apply the inverse transformations on. img_mean: vector of mean colour values. Returns: The batch of the size num_images with the same spatial dimensions as the input. """ n, h, w, c = imgs.shape assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) outputs = np.zeros((num_images, h, w, c), dtype=np.uint8) for i in range(num_images): outputs[i] = (imgs[i] + img_mean)[:, :, ::-1].astype(np.uint8) return outputs
Inverse preprocessing of the batch of images. Add the mean vector and convert from BGR to RGB. Args: imgs: batch of input images. num_images: number of images to apply the inverse transformations on. img_mean: vector of mean colour values. Returns: The batch of the size num_images with the same spatial dimensions as the input.
inv_preprocess
python
iyah4888/SIGGRAPH18SSS
deeplab_resnet/utils.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py
MIT
def __init__(self, def_path, phase='test'): ''' def_path: Path to the model definition (.prototxt) data_path: Path to the model data (.caffemodel) phase: Either 'test' or 'train'. Used for filtering phase-specific nodes. ''' self.def_path = def_path self.phase = phase self.load()
def_path: Path to the model definition (.prototxt) data_path: Path to the model data (.caffemodel) phase: Either 'test' or 'train'. Used for filtering phase-specific nodes.
__init__
python
iyah4888/SIGGRAPH18SSS
kaffe/graph.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py
MIT
def load(self): '''Load the layer definitions from the prototxt.''' self.params = get_caffe_resolver().NetParameter() with open(self.def_path, 'rb') as def_file: text_format.Merge(def_file.read(), self.params)
Load the layer definitions from the prototxt.
load
python
iyah4888/SIGGRAPH18SSS
kaffe/graph.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py
MIT
def filter_layers(self, layers): '''Filter out layers based on the current phase.''' phase_map = {0: 'train', 1: 'test'} filtered_layer_names = set() filtered_layers = [] for layer in layers: phase = self.phase if len(layer.include): phase = phase_map[layer.include[0].phase] if len(layer.exclude): phase = phase_map[1 - layer.include[0].phase] exclude = (phase != self.phase) # Dropout layers appear in a fair number of Caffe # test-time networks. These are just ignored. We'll # filter them out here. if (not exclude) and (phase == 'test'): exclude = (layer.type == LayerType.Dropout) if not exclude: filtered_layers.append(layer) # Guard against dupes. assert layer.name not in filtered_layer_names filtered_layer_names.add(layer.name) return filtered_layers
Filter out layers based on the current phase.
filter_layers
python
iyah4888/SIGGRAPH18SSS
kaffe/graph.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py
MIT
def make_node(self, layer): '''Create a graph node for the given layer.''' kind = NodeKind.map_raw_kind(layer.type) if kind is None: raise KaffeError('Unknown layer type encountered: %s' % layer.type) # We want to use the layer's top names (the "output" names), rather than the # name attribute, which is more of readability thing than a functional one. # Other layers will refer to a node by its "top name". return Node(layer.name, kind, layer=layer)
Create a graph node for the given layer.
make_node
python
iyah4888/SIGGRAPH18SSS
kaffe/graph.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py
MIT
def make_input_nodes(self): ''' Create data input nodes. This method is for old-style inputs, where the input specification was not treated as a first-class layer in the prototext. Newer models use the "Input layer" type. ''' nodes = [Node(name, NodeKind.Data) for name in self.params.input] if len(nodes): input_dim = list(map(int, self.params.input_dim)) if not input_dim: if len(self.params.input_shape) > 0: input_dim = list(map(int, self.params.input_shape[0].dim)) else: raise KaffeError('Dimensions for input not specified.') for node in nodes: node.output_shape = tuple(input_dim) return nodes
Create data input nodes. This method is for old-style inputs, where the input specification was not treated as a first-class layer in the prototext. Newer models use the "Input layer" type.
make_input_nodes
python
iyah4888/SIGGRAPH18SSS
kaffe/graph.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py
MIT
def build(self): ''' Builds the graph from the Caffe layer definitions. ''' # Get the layers layers = self.params.layers or self.params.layer # Filter out phase-excluded layers layers = self.filter_layers(layers) # Get any separately-specified input layers nodes = self.make_input_nodes() nodes += [self.make_node(layer) for layer in layers] # Initialize the graph graph = Graph(nodes=nodes, name=self.params.name) # Connect the nodes # # A note on layers and outputs: # In Caffe, each layer can produce multiple outputs ("tops") from a set of inputs # ("bottoms"). The bottoms refer to other layers' tops. The top can rewrite a bottom # (in case of in-place operations). Note that the layer's name is not used for establishing # any connectivity. It's only used for data association. By convention, a layer with a # single top will often use the same name (although this is not required). # # The current implementation only supports single-output nodes (note that a node can still # have multiple children, since multiple child nodes can refer to the single top's name). node_outputs = {} for layer in layers: node = graph.get_node(layer.name) for input_name in layer.bottom: assert input_name != layer.name parent_node = node_outputs.get(input_name) if (parent_node is None) or (parent_node == node): parent_node = graph.get_node(input_name) node.add_parent(parent_node) if len(layer.top)>1: raise KaffeError('Multiple top nodes are not supported.') for output_name in layer.top: if output_name == layer.name: # Output is named the same as the node. No further action required. continue # There are two possibilities here: # # Case 1: output_name refers to another node in the graph. # This is an "in-place operation" that overwrites an existing node. # This would create a cycle in the graph. We'll undo the in-placing # by substituting this node wherever the overwritten node is referenced. # # Case 2: output_name violates the convention layer.name == output_name. # Since we are working in the single-output regime, we will can rename it to # match the layer name. # # For both cases, future references to this top re-routes to this node. node_outputs[output_name] = node graph.compute_output_shapes() return graph
Builds the graph from the Caffe layer definitions.
build
python
iyah4888/SIGGRAPH18SSS
kaffe/graph.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py
MIT
def load(self, data_path, session, ignore_missing=False): '''Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored. ''' data_dict = np.load(data_path).item() for op_name in data_dict: with tf.variable_scope(op_name, reuse=True): for param_name, data in data_dict[op_name].items(): try: var = tf.get_variable(param_name) session.run(var.assign(data)) except ValueError: if not ignore_missing: raise
Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored.
load
python
iyah4888/SIGGRAPH18SSS
kaffe/tensorflow/network.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py
MIT
def feed(self, *args): '''Set the input(s) for the next operation by replacing the terminal nodes. The arguments can be either layer names or the actual layers. ''' assert len(args) != 0 self.terminals = [] for fed_layer in args: if isinstance(fed_layer, str): try: fed_layer = self.layers[fed_layer] except KeyError: raise KeyError('Unknown layer name fed: %s' % fed_layer) self.terminals.append(fed_layer) return self
Set the input(s) for the next operation by replacing the terminal nodes. The arguments can be either layer names or the actual layers.
feed
python
iyah4888/SIGGRAPH18SSS
kaffe/tensorflow/network.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py
MIT
def get_unique_name(self, prefix): '''Returns an index-suffixed unique name for the given prefix. This is used for auto-generating layer names based on the type-prefix. ''' ident = sum(t.startswith(prefix) for t, _ in list(self.layers.items())) + 1 return '%s_%d' % (prefix, ident)
Returns an index-suffixed unique name for the given prefix. This is used for auto-generating layer names based on the type-prefix.
get_unique_name
python
iyah4888/SIGGRAPH18SSS
kaffe/tensorflow/network.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py
MIT
def get_padding_type(kernel_params, input_shape, output_shape): '''Translates Caffe's numeric padding to one of ('SAME', 'VALID'). Caffe supports arbitrary padding values, while TensorFlow only supports 'SAME' and 'VALID' modes. So, not all Caffe paddings can be translated to TensorFlow. There are some subtleties to how the padding edge-cases are handled. These are described here: https://github.com/Yangqing/caffe2/blob/master/caffe2/proto/caffe2_legacy.proto ''' k_h, k_w, s_h, s_w, p_h, p_w = kernel_params s_o_h = np.ceil(input_shape.height / float(s_h)) s_o_w = np.ceil(input_shape.width / float(s_w)) if (output_shape.height == s_o_h) and (output_shape.width == s_o_w): return 'SAME' v_o_h = np.ceil((input_shape.height - k_h + 1.0) / float(s_h)) v_o_w = np.ceil((input_shape.width - k_w + 1.0) / float(s_w)) if (output_shape.height == v_o_h) and (output_shape.width == v_o_w): return 'VALID' return None
Translates Caffe's numeric padding to one of ('SAME', 'VALID'). Caffe supports arbitrary padding values, while TensorFlow only supports 'SAME' and 'VALID' modes. So, not all Caffe paddings can be translated to TensorFlow. There are some subtleties to how the padding edge-cases are handled. These are described here: https://github.com/Yangqing/caffe2/blob/master/caffe2/proto/caffe2_legacy.proto
get_padding_type
python
iyah4888/SIGGRAPH18SSS
kaffe/tensorflow/transformer.py
https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/transformer.py
MIT
def run( self, query="What is a lagrangian?", limit_broad_results=1_000, limit_deduped_url_results=50, limit_hierarchical_url_results=50, limit_final_pagerank_results=20, url_contains_filter=None, ): """Run a search query using the WebSearchEngine client""" query_vector = self.client.get_query_vector(query) broad_results = self.client.similarity_search( query_vector=query_vector, limit=limit_broad_results ) if not url_contains_filter: url_contains_filter = [] deduped_url_results = select_top_urls( broad_results, max_urls=limit_deduped_url_results, url_contains=url_contains_filter, ) hierarchical_url_results = ( self.client.hierarchical_similarity_reranking( query_vector=query_vector, urls=deduped_url_results, limit=limit_hierarchical_url_results, ) ) pagerank_reranked_results = self.client.pagerank_reranking( hierarchical_url_results )[:limit_final_pagerank_results] return pagerank_reranked_results
Run a search query using the WebSearchEngine client
run
python
SciPhi-AI/agent-search
agent_search/app/server.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/app/server.py
Apache-2.0
def to_string_dict(self) -> dict: """Returns a dictionary representation with all values as strings.""" return { "score": str(self.score), "url": self.url, "title": self.title, "dataset": self.dataset, "metadata": self.metadata, "text": self.text, }
Returns a dictionary representation with all values as strings.
to_string_dict
python
SciPhi-AI/agent-search
agent_search/core/search_types.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/search_types.py
Apache-2.0
def select_top_urls( ordered_points: List[AgentSearchResult], max_urls: int = 10, url_contains: Optional[List[str]] = None, ) -> List[str]: """A function to return the top unique URLs from the given poitns results.""" if not url_contains: url_contains = [] top_urls = set([]) for point in ordered_points: url = point.url if url in top_urls: continue url_contains_match = False if url_contains else True for url_contain in url_contains: if url_contain in url: url_contains_match = True break if not url_contains_match: continue top_urls.add(point.url) if len(top_urls) >= max_urls: break return list(top_urls)
A function to return the top unique URLs from the given poitns results.
select_top_urls
python
SciPhi-AI/agent-search
agent_search/core/utils.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/utils.py
Apache-2.0
def cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float: """Compute the cosine similarity between two vectors.""" dot_product = np.dot(v1, v2) norm_v1 = np.linalg.norm(v1) norm_v2 = np.linalg.norm(v2) return dot_product / (norm_v1 * norm_v2)
Compute the cosine similarity between two vectors.
cosine_similarity
python
SciPhi-AI/agent-search
agent_search/core/utils.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/utils.py
Apache-2.0
def __init__( self, api_base: Optional[str] = None, api_key: Optional[str] = None, timeout: int = 30, ) -> None: """ Initializes the SciPhi client. Args: api_base (Optional[str]): Base URL for the SciPhi API. api_key (Optional[str]): API key for authenticating requests. timeout (int): Timeout for API requests in seconds. Raises: ValueError: If `api_key` is not provided. """ self.api_base = ( api_base or os.getenv("SCIPHI_API_BASE") or "https://api.sciphi.ai" ) self.api_key = api_key or os.getenv("SCIPHI_API_KEY") if not self.api_key: raise ValueError( "You must specify an explicit api_key or define `SCIPHI_API_KEY` to initialize a SciPhi client." ) self.timeout = timeout self.client = httpx.Client( base_url=self.api_base, headers=self._auth_headers(), timeout=timeout, )
Initializes the SciPhi client. Args: api_base (Optional[str]): Base URL for the SciPhi API. api_key (Optional[str]): API key for authenticating requests. timeout (int): Timeout for API requests in seconds. Raises: ValueError: If `api_key` is not provided.
__init__
python
SciPhi-AI/agent-search
agent_search/providers/sciphi.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py
Apache-2.0
def _handle_api_response(self, response: httpx.Response) -> Dict: """ Handles the HTTP response from the API. Args: response (httpx.Response): The response from the API request. Returns: Dict: JSON response content. Raises: Exception: If the response indicates an error. """ if response.is_error: # Handle errors appropriately raise Exception( f"API request failed with status {response.status_code}" ) result = response.json() return result
Handles the HTTP response from the API. Args: response (httpx.Response): The response from the API request. Returns: Dict: JSON response content. Raises: Exception: If the response indicates an error.
_handle_api_response
python
SciPhi-AI/agent-search
agent_search/providers/sciphi.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py
Apache-2.0
def _handle_search_response(self, search_results: Dict[str, str]) -> None: """ Handles dictionary search resopnses from the API. Args: search_results (Dict[str, str]): The response from the API request. Returns: Dict: JSON response content. Raises: Exception: If the response indicates an error. """ for result in search_results: if "score" in result: result["score"] = float(result["score"]) if "metadata" in result: try: result["metadata"] = ( json.loads(result["metadata"]) if ( result["metadata"] != None and result["metadata"] != '""' ) else {} ) except Exception as e: result["metadata"] = dict()
Handles dictionary search resopnses from the API. Args: search_results (Dict[str, str]): The response from the API request. Returns: Dict: JSON response content. Raises: Exception: If the response indicates an error.
_handle_search_response
python
SciPhi-AI/agent-search
agent_search/providers/sciphi.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py
Apache-2.0
def _retry_api_request( self, method: str, url: str, payload: Dict, max_retries: int = 3 ): """ Common method for retrying API requests with exponential backoff. Args: method (str): The HTTP method to use ('get' or 'post'). url (str): The API endpoint. payload (Dict): The payload for the request. max_retries (int): Maximum number of retry attempts. Returns: Dict: The JSON response from the API. Raises: Exception: If the maximum number of retries is reached. """ for attempt in range(max_retries): try: response = getattr(self.client, method)(url, json=payload) return self._handle_api_response(response) except httpx.HTTPError as e: logger.info(f"HTTP error on attempt {attempt + 1}: {e}") if attempt < max_retries - 1: time.sleep(0.5 * (2**attempt)) except Exception as e: logger.error(f"Error on attempt {attempt + 1}: {e}") if attempt < max_retries - 1: time.sleep(0.5 * (2**attempt)) raise Exception("Failed to fetch data after maximum retries.")
Common method for retrying API requests with exponential backoff. Args: method (str): The HTTP method to use ('get' or 'post'). url (str): The API endpoint. payload (Dict): The payload for the request. max_retries (int): Maximum number of retry attempts. Returns: Dict: The JSON response from the API. Raises: Exception: If the maximum number of retries is reached.
_retry_api_request
python
SciPhi-AI/agent-search
agent_search/providers/sciphi.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py
Apache-2.0
def search( self, query: str, search_provider: str, max_retries: int = 3 ) -> List[Dict]: """ Performs a search query using the SciPhi API with retry and backoff logic. Args: query (str): The search query string. search_provider (str): The search provider to use. max_retries (int): Maximum number of retry attempts. Returns: List[Dict]: A list of search results. """ url = f"/search" payload = {"provider": search_provider, "query": query} try: handled_response = self._retry_api_request( "post", url, payload, max_retries ) self._handle_search_response(handled_response) return [SearchResult(**ele).dict() for ele in handled_response] except Exception as e: logger.error(f"Search request failed: {e}") return {"error": str(e)}
Performs a search query using the SciPhi API with retry and backoff logic. Args: query (str): The search query string. search_provider (str): The search provider to use. max_retries (int): Maximum number of retry attempts. Returns: List[Dict]: A list of search results.
search
python
SciPhi-AI/agent-search
agent_search/providers/sciphi.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py
Apache-2.0
def get_search_rag_response( self, query: str, search_provider: str, llm_model: str = "SciPhi/Sensei-7B-V1", temperature: int = 0.2, top_p: int = 0.95, ): """ Retrieves a search RAG (Retrieval-Augmented Generation) response from the API. Args: query (str): The search query string. search_provider (str): The search provider to use. llm_model (str): The language model to use. temperature (int): The temperature setting for the query. top_p (int): The top-p setting for the query. Returns: Dict: A dictionary with the search response and related queries. """ if query == "": raise ValueError("Blank query submitted.") if search_provider not in ["bing", "agent-search"]: raise ValueError(f"Unsupported provider, {search_provider}") url = f"/search_rag" payload = { "query": query, "search_provider": search_provider, "llm_model": llm_model, "temperature": temperature, "top_p": top_p, } try: handled_response = self._retry_api_request("post", url, payload) # rename the other queries to `related_queries` until LLM output is re-factored. handled_response["related_queries"] = handled_response.pop( "other_queries" ) self._handle_search_response(handled_response["search_results"]) # Use Pydantic model for parsing and validation search_response = SearchRAGResponse(**handled_response) except Exception as e: logger.error(f"Search request failed: {e}") return {"error": str(e)} return search_response.dict()
Retrieves a search RAG (Retrieval-Augmented Generation) response from the API. Args: query (str): The search query string. search_provider (str): The search provider to use. llm_model (str): The language model to use. temperature (int): The temperature setting for the query. top_p (int): The top-p setting for the query. Returns: Dict: A dictionary with the search response and related queries.
get_search_rag_response
python
SciPhi-AI/agent-search
agent_search/providers/sciphi.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py
Apache-2.0
def completion( self, prompt: str, llm_model_name: str = "SciPhi/Sensei-7B-V1", llm_max_tokens_to_sample: int = 1_024, llm_temperature: float = 0.2, llm_top_p: float = 0.90, ) -> SearchRAGResponse: """ Generates a completion for a given prompt using the SciPhi API. Args: prompt (str): The prompt for generating completion. llm_model_name (str): The language model to use. llm_max_tokens_to_sample (int): Maximum number of tokens for the sample. llm_temperature (float): The temperature setting for the query. llm_top_p (float): The top-p setting for the query. Returns: Dict: A dictionary containing the generated completion. Raises: ImportError: If the `sciphi-synthesizer` package is not installed. """ try: import synthesizer except ImportError as e: raise ImportError( "Please install run `pip install sciphi-synthesizer` before attempting to generate a completion." ) from synthesizer.core import LLMProviderName from synthesizer.interface import LLMInterfaceManager from synthesizer.llm import GenerationConfig try: llm_interface = LLMInterfaceManager.get_interface_from_args( LLMProviderName("sciphi"), ) generation_config = GenerationConfig( model_name=llm_model_name, max_tokens_to_sample=llm_max_tokens_to_sample, temperature=llm_temperature, top_p=llm_top_p, ) completion = llm_interface.get_completion( prompt, generation_config ).replace("</s>", "") return completion except Exception as e: logger.error(f"Completion generation failed: {e}") return {"error": str(e)}
Generates a completion for a given prompt using the SciPhi API. Args: prompt (str): The prompt for generating completion. llm_model_name (str): The language model to use. llm_max_tokens_to_sample (int): Maximum number of tokens for the sample. llm_temperature (float): The temperature setting for the query. llm_top_p (float): The top-p setting for the query. Returns: Dict: A dictionary containing the generated completion. Raises: ImportError: If the `sciphi-synthesizer` package is not installed.
completion
python
SciPhi-AI/agent-search
agent_search/providers/sciphi.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py
Apache-2.0
def process_rows(rows, output_queue): """Process the rows into qdrant point objects.""" qdrant_points = [] for row in rows: _, url, __, text_chunks, embeddings_binary, ___, ____ = row embeddings = np.frombuffer( embeddings_binary, dtype=np.float32 ).reshape(-1, EMBEDDING_VEC_SIZE) text_chunks = json.loads(text_chunks) # Prepare data for Qdrant qdrant_points.append( models.PointStruct( id=str(uuid.uuid3(uuid.NAMESPACE_DNS, url)), vector=[float(ele) for ele in embeddings[0]], payload={"text": text_chunks[0], "url": url}, ) ) output_queue.put(qdrant_points)
Process the rows into qdrant point objects.
process_rows
python
SciPhi-AI/agent-search
agent_search/scripts/populate_qdrant_from_postgres.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py
Apache-2.0
def qdrant_writer(config, qdrant_queue, delete_existing): """A writer that listens for output events in a separate thread.""" qclient = QdrantClient( config["qdrant_host"], port=config["qdrant_grpc_port"], prefer_grpc=config["qdrant_prefer_grpc"], ) if delete_existing: qclient.delete_collection(config["qdrant_collection_name"]) create_collection(qclient, config["qdrant_collection_name"]) logger.info("Launching Qdrant writer") while True: try: points = qdrant_queue.get() logger.info(f"Starting Qdrant write-out...") if points is None: # Sentinel to end the process break operation_result = qclient.upsert( collection_name=config["qdrant_collection_name"], wait=True, points=points, ) logger.info( f"Finished Qdrant write-out with result {operation_result}..." ) except Exception as e: logger.info(f"Task failed with {e}")
A writer that listens for output events in a separate thread.
qdrant_writer
python
SciPhi-AI/agent-search
agent_search/scripts/populate_qdrant_from_postgres.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py
Apache-2.0
def process_batches(config, start, end, batch_size, output_queue): """Processes the batches in steps of the given batch_size""" # Connect to the database conn = psycopg2.connect( dbname=config["postgres_db"], user=config["postgres_user"], password=config["postgres_password"], host=config["postgres_host"], options="-c client_encoding=UTF8", ) cur = conn.cursor() # Declare a server-side cursor with offset cur.execute( f"DECLARE proc_cursor CURSOR FOR SELECT * FROM {config['postgres_table_name']} OFFSET {start} LIMIT {end - start}" ) offset = start while True: logger.info( f"Fetching a batch of size {batch_size} at offset {offset}" ) # Fetch a batch of rows cur.execute(f"FETCH {batch_size} FROM proc_cursor") rows = cur.fetchall() if len(rows) == 0: break process_rows(rows, output_queue) offset += batch_size # terminate if offset + batch_size >= end: break cur.close() conn.close()
Processes the batches in steps of the given batch_size
process_batches
python
SciPhi-AI/agent-search
agent_search/scripts/populate_qdrant_from_postgres.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py
Apache-2.0
def run(self, num_processes=16, batch_size=1_024, delete_existing=False): """Runs the population process for the qdrant database""" qdrant_queue = multiprocessing.Queue() qdrant_writer_thread = multiprocessing.Process( target=qdrant_writer, args=( self.config, qdrant_queue, delete_existing, ), ) qdrant_writer_thread.start() conn = psycopg2.connect( dbname=self.config["postgres_db"], user=self.config["postgres_user"], password=self.config["postgres_password"], host=self.config["postgres_host"], options="-c client_encoding=UTF8", ) cur = conn.cursor() # Count total number of entries cur.execute( f"SELECT COUNT(*) FROM {self.config['postgres_table_name']}" ) total_count = cur.fetchone()[0] logger.info( f"Processing {total_count} entries in {num_processes} processes" ) range_size = total_count // num_processes # Create and start multiprocessing workflow processes = [] for i in range(num_processes): logger.info(f"Starting process {i}...") start = i * range_size end = start + range_size if i < num_processes - 1 else total_count proc = multiprocessing.Process( target=process_batches, args=( self.config, start, end, batch_size, qdrant_queue, ), ) processes.append(proc) proc.start() # Wait for all processes to finish for proc in processes: proc.join() # send termination signal qdrant_queue.put(None) cur.close() conn.close()
Runs the population process for the qdrant database
run
python
SciPhi-AI/agent-search
agent_search/scripts/populate_qdrant_from_postgres.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py
Apache-2.0
def hierarchical_similarity_reranking( self, query_vector: np.ndarray, urls: List[str], limit: int = 100, ) -> List[AgentSearchResult]: """Hierarchical URL search to find the most similar text chunk for the given query and URLs""" results = self.execute_batch_query(urls) # List to store the results along with their similarity scores similarity_results = [] # Iterate over each result to find the most similar text chunk for result in results: ( url, title, metadata, dataset, text_chunks_str, embeddings_binary, ) = result # deserialize the embeddings and text chunks embeddings = np.frombuffer( embeddings_binary, dtype=np.float32 ).reshape(-1, 768) text_chunks = json.loads(text_chunks_str) max_similarity = -1e9 most_similar_chunk = None # Iterate over each embedding to find the one with maximum cosine similarity for chunk, embedding in zip(text_chunks, embeddings): similarity = cosine_similarity( np.array(query_vector), np.array(embedding) ) if similarity > max_similarity: max_similarity = similarity most_similar_chunk = chunk # Store the most similar chunk and its similarity score similarity_results.append( AgentSearchResult( score=max_similarity, url=url, title=title, metadata=json.loads(metadata), dataset=dataset, text=most_similar_chunk, ), ) # Sort the results based on similarity score in descending order similarity_results.sort(key=lambda x: x.score, reverse=True) return similarity_results[:limit]
Hierarchical URL search to find the most similar text chunk for the given query and URLs
hierarchical_similarity_reranking
python
SciPhi-AI/agent-search
agent_search/search/base.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/search/base.py
Apache-2.0
def pagerank_reranking( self, similarity_results: List[AgentSearchResult], limit: int = 100, ) -> List[AgentSearchResult]: """Reranks the results based on the PageRank score of the domain""" if not self.pagerank_rerank_module: raise Exception( "PageRank reranking module is not enabled. Please set pagerank_rerank_module=True while initializing the WebSearchEngine client." ) # List to store the results along with their PageRank scores pagerank_results = [] # Iterate over each result to find the PageRank score of the domain for result in similarity_results: pagerank_score = 0 try: domain = result.url.split("/")[2] pagerank_score = self.domain_to_rank_map.get(domain, 0) except Exception as e: logger.info(f"Error {e}: Found for URL: {result.url}") reweighted_score = ( self.pagerank_importance * pagerank_score / 10.0 + (1 - self.pagerank_importance) * result.score ) pagerank_results.append( AgentSearchResult( score=reweighted_score, url=result.url, title=result.title, metadata=result.metadata, dataset=result.dataset, text=result.text, ) ) # Sort the results based on PageRank score in descending order pagerank_results.sort(key=lambda x: x.score, reverse=True) return pagerank_results[:limit]
Reranks the results based on the PageRank score of the domain
pagerank_reranking
python
SciPhi-AI/agent-search
agent_search/search/base.py
https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/search/base.py
Apache-2.0
def scrub_str(string): """ The purpose of this function is to scrub the weird template mark-up out of strings that Veekun is using for their pokedex. Example: []{move:dragon-tail} will effect the opponents [HP]{mechanic:hp}. Becomes: dragon tail will effect the opponents HP. If you find this results in weird strings please take a stab at improving or re-writing. """ groups = re.findall(GROUP_RGX, string) for group in groups: if group[0]: sub = group[0] else: sub = group[1].split(":") if len(sub) >= 2: sub = sub[1] else: sub = sub[0] sub = sub.replace("-", " ") string = re.sub(SUB_RGX, sub, string, 1) return string
The purpose of this function is to scrub the weird template mark-up out of strings that Veekun is using for their pokedex. Example: []{move:dragon-tail} will effect the opponents [HP]{mechanic:hp}. Becomes: dragon tail will effect the opponents HP. If you find this results in weird strings please take a stab at improving or re-writing.
scrub_str
python
PokeAPI/pokeapi
data/v2/build.py
https://github.com/PokeAPI/pokeapi/blob/master/data/v2/build.py
BSD-3-Clause
def __SectionLength(this): """(4 bytes) Gets the length of characters the given section is""" offset = this.__SectionDataOffset return struct.unpack_from("<I", this.__data, offset)[0]
(4 bytes) Gets the length of characters the given section is
__SectionLength
python
PokeAPI/pokeapi
Resources/scripts/data/gen8/read_swsh.py
https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py
BSD-3-Clause
def __LineOffsets(this): """Figures out the offset for each entry based on the data section offset""" result = [None] * this.__LineCount sdo = int(this.__SectionDataOffset) for i in range(0, len(result)): result[i] = TextLine() result[i].offset = struct.unpack_from("<i", this.__data, (i * 8) + sdo + 4)[0] + sdo result[i].length = struct.unpack_from("<h", this.__data, (i * 8) + sdo + 8)[0] return result
Figures out the offset for each entry based on the data section offset
__LineOffsets
python
PokeAPI/pokeapi
Resources/scripts/data/gen8/read_swsh.py
https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py
BSD-3-Clause
def HashFNV1_64(this, word): """Fowler-Noll-Vo hash function; 64-bit""" fnvPrime_64 = 0x100000001b3 offsetBasis_64 = 0xCBF29CE484222645 hash = offsetBasis_64 for c in word: hash = hash ^ ord(c) # Cast hash to at 64-bit value hash = (hash * fnvPrime_64) % 2**64 return hash
Fowler-Noll-Vo hash function; 64-bit
HashFNV1_64
python
PokeAPI/pokeapi
Resources/scripts/data/gen8/read_swsh.py
https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py
BSD-3-Clause
def __LineData(this, data): """Loads the file into a list to later decrypt""" key = copy.copy(this.__KEY_BASE) result = [None] * this.__LineCount lines = this.__LineOffsets for i in range(0, len(lines)): # Make a list twice the size of the current text line size encrypted = lines[i].length * 2 # Then copy the encrypted line starting from the given offset for however long the given list is end = lines[i].offset + encrypted encrypted = this.__data[lines[i].offset:end] result[i] = this.__CryptLineData(encrypted, key) # Cast key to a 16-bits (otherwise things break) key = (key + this.__KEY_ADVANCE) % 2**16 return result
Loads the file into a list to later decrypt
__LineData
python
PokeAPI/pokeapi
Resources/scripts/data/gen8/read_swsh.py
https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py
BSD-3-Clause
def __CryptLineData(this, data, key): """Decrypts the given line into a list of bytes""" copied = copy.copy(data) result = [None] * len(copied) for i in range(0, len(copied), 2): result[i] = copied[i] ^ (key % 256) result[i + 1] = copied[i + 1] ^ ((key >> 8) % 256) # Bit-shift and OR key, then cast to 16-bits (otherwise things break) key = (key << 3 | key >> 13) % 2**16 return result
Decrypts the given line into a list of bytes
__CryptLineData
python
PokeAPI/pokeapi
Resources/scripts/data/gen8/read_swsh.py
https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py
BSD-3-Clause
def __GetLineString(this, data): """Turns the given list of bytes into a finished string""" if (data is None): return None string = "" i = 0 while (i < len(data)): # Cast 2 bytes to figure out what to do next value = struct.unpack_from("<H", data, i)[0] if (value == this.__KEY_TERMINATOR): break; i += 2 if (value == this.__KEY_TERMINATOR): return string elif (value == this.__KEY_VARIABLE): string += "[VAR]" elif (value == "\n"): string += "\n" elif (value == "\\"): string += "\\" elif (value == "["): string += "\[" else: string += chr(value) return string
Turns the given list of bytes into a finished string
__GetLineString
python
PokeAPI/pokeapi
Resources/scripts/data/gen8/read_swsh.py
https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py
BSD-3-Clause
def __MakeLabelHash(this, f): """Returns the label name and a FNV1_64 hash""" # Next 8 bytes is the hash of the label name hash = struct.unpack("<Q", f.read(8))[0] # Next 2 bytes is the label"s name length nameLength = struct.unpack("<H", f.read(2))[0] # Read the bytes until 0x0 is found name = this.__ReadUntil(f, 0x0) if (this.HashFNV1_64(name) == hash): return name, hash
Returns the label name and a FNV1_64 hash
__MakeLabelHash
python
PokeAPI/pokeapi
Resources/scripts/data/gen8/read_swsh.py
https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py
BSD-3-Clause
def __ReadUntil(this, f, value): """Reads the given file until it reaches the given value""" string = "" c = f.read(1) end = bytes([value]) while (c != end): # Read one byte at a time to get each character string += c.decode("utf-8") c = f.read(1) return string
Reads the given file until it reaches the given value
__ReadUntil
python
PokeAPI/pokeapi
Resources/scripts/data/gen8/read_swsh.py
https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py
BSD-3-Clause
def call_phone_number(input: str) -> str: """calls a phone number as a bot and returns a transcript of the conversation. the input to this tool is a pipe separated list of a phone number, a prompt, and the first thing the bot should say. The prompt should instruct the bot with what to do on the call and be in the 3rd person, like 'the assistant is performing this task' instead of 'perform this task'. should only use this tool once it has found an adequate phone number to call. for example, `+15555555555|the assistant is explaining the meaning of life|i'm going to tell you the meaning of life` will call +15555555555, say 'i'm going to tell you the meaning of life', and instruct the assistant to tell the human what the meaning of life is. """ phone_number, prompt, initial_message = input.split("|", 2) call = OutboundCall( base_url=os.environ["TELEPHONY_SERVER_BASE_URL"], to_phone=phone_number, from_phone=os.environ["OUTBOUND_CALLER_NUMBER"], config_manager=RedisConfigManager(), agent_config=ChatGPTAgentConfig( prompt_preamble=prompt, initial_message=BaseMessage(text=initial_message), ), logger=logging.Logger("call_phone_number"), ) LOOP.run_until_complete(call.start()) while True: maybe_transcript = get_transcript(call.conversation_id) if maybe_transcript: delete_transcript(call.conversation_id) return maybe_transcript else: time.sleep(1)
calls a phone number as a bot and returns a transcript of the conversation. the input to this tool is a pipe separated list of a phone number, a prompt, and the first thing the bot should say. The prompt should instruct the bot with what to do on the call and be in the 3rd person, like 'the assistant is performing this task' instead of 'perform this task'. should only use this tool once it has found an adequate phone number to call. for example, `+15555555555|the assistant is explaining the meaning of life|i'm going to tell you the meaning of life` will call +15555555555, say 'i'm going to tell you the meaning of life', and instruct the assistant to tell the human what the meaning of life is.
call_phone_number
python
vocodedev/vocode-core
apps/langchain_agent/tools/vocode.py
https://github.com/vocodedev/vocode-core/blob/master/apps/langchain_agent/tools/vocode.py
MIT
async def respond( self, human_input: str, conversation_id: str, is_interrupt: bool = False, ) -> Tuple[Optional[str], bool]: """Generates a response from the SpellerAgent. The response is generated by joining each character in the human input with a space. The second element of the tuple indicates whether the agent should stop (False means it should not stop). Args: human_input (str): The input from the human user. conversation_id (str): The ID of the conversation. is_interrupt (bool): A flag indicating whether the agent was interrupted. Returns: Tuple[Optional[str], bool]: The generated response and a flag indicating whether to stop. """ return "".join(c + " " for c in human_input), False
Generates a response from the SpellerAgent. The response is generated by joining each character in the human input with a space. The second element of the tuple indicates whether the agent should stop (False means it should not stop). Args: human_input (str): The input from the human user. conversation_id (str): The ID of the conversation. is_interrupt (bool): A flag indicating whether the agent was interrupted. Returns: Tuple[Optional[str], bool]: The generated response and a flag indicating whether to stop.
respond
python
vocodedev/vocode-core
apps/telephony_app/speller_agent.py
https://github.com/vocodedev/vocode-core/blob/master/apps/telephony_app/speller_agent.py
MIT
def create_agent(self, agent_config: AgentConfig) -> BaseAgent: """Creates an agent based on the provided agent configuration. Args: agent_config (AgentConfig): The configuration for the agent to be created. Returns: BaseAgent: The created agent. Raises: Exception: If the agent configuration type is not recognized. """ # If the agent configuration type is CHAT_GPT, create a ChatGPTAgent. if isinstance(agent_config, ChatGPTAgentConfig): return ChatGPTAgent(agent_config=agent_config) # If the agent configuration type is agent_speller, create a SpellerAgent. elif isinstance(agent_config, SpellerAgentConfig): return SpellerAgent(agent_config=agent_config) # If the agent configuration type is not recognized, raise an exception. raise Exception("Invalid agent config")
Creates an agent based on the provided agent configuration. Args: agent_config (AgentConfig): The configuration for the agent to be created. Returns: BaseAgent: The created agent. Raises: Exception: If the agent configuration type is not recognized.
create_agent
python
vocodedev/vocode-core
apps/telephony_app/speller_agent.py
https://github.com/vocodedev/vocode-core/blob/master/apps/telephony_app/speller_agent.py
MIT
def get_metrics_data(self): """Reads and returns current metrics from the SDK""" with self._lock: self.collect() metrics_data = self._metrics_data self._metrics_data = None return metrics_data
Reads and returns current metrics from the SDK
get_metrics_data
python
vocodedev/vocode-core
playground/streaming/tracing_utils.py
https://github.com/vocodedev/vocode-core/blob/master/playground/streaming/tracing_utils.py
MIT
def default_env_vars() -> dict[str, str]: """ Defines default environment variables for the test session. This fixture provides a dictionary of default environment variables that are commonly used across tests. It can be overridden in submodule scoped `conftest.py` files or directly in tests. :return: A dictionary of default environment variables. """ return { "ENVIRONMENT": "test", "AZURE_OPENAI_API_BASE_EAST_US": "https://api.openai.com", "AZURE_OPENAI_API_KEY_EAST_US": "test", }
Defines default environment variables for the test session. This fixture provides a dictionary of default environment variables that are commonly used across tests. It can be overridden in submodule scoped `conftest.py` files or directly in tests. :return: A dictionary of default environment variables.
default_env_vars
python
vocodedev/vocode-core
tests/conftest.py
https://github.com/vocodedev/vocode-core/blob/master/tests/conftest.py
MIT
def mock_env( monkeypatch: MonkeyPatch, request: pytest.FixtureRequest, default_env_vars: dict[str, str] ) -> Generator[None, None, None]: """ Temporarily sets environment variables for testing. This fixture allows tests to run with a modified set of environment variables, either using the default set provided by `default_env_vars` or overridden by test-specific parameters. It ensures that changes to environment variables do not leak between tests. :param monkeypatch: The pytest monkeypatch fixture for modifying environment variables. :param request: The pytest FixtureRequest object for accessing test-specific overrides. :param default_env_vars: A dictionary of default environment variables. :yield: None. This is a setup-teardown fixture that cleans up after itself. """ envvars = default_env_vars.copy() if hasattr(request, "param") and isinstance(request.param, dict): envvars.update(request.param) with mock.patch.dict(os.environ, envvars): yield
Temporarily sets environment variables for testing. This fixture allows tests to run with a modified set of environment variables, either using the default set provided by `default_env_vars` or overridden by test-specific parameters. It ensures that changes to environment variables do not leak between tests. :param monkeypatch: The pytest monkeypatch fixture for modifying environment variables. :param request: The pytest FixtureRequest object for accessing test-specific overrides. :param default_env_vars: A dictionary of default environment variables. :yield: None. This is a setup-teardown fixture that cleans up after itself.
mock_env
python
vocodedev/vocode-core
tests/conftest.py
https://github.com/vocodedev/vocode-core/blob/master/tests/conftest.py
MIT
def default_env_vars(default_env_vars: dict[str, str]) -> dict[str, str]: """ Extends the `default_env_vars` fixture specifically for the submodule. This fixture takes the session-scoped `default_env_vars` fixture from the parent conftest.py and extends or overrides it with additional or modified environment variables specific to the submodule. :param default_env_vars: The inherited `default_env_vars` fixture from the parent conftest. :return: A modified dictionary of default environment variables for the submodule. """ submodule_env_vars = default_env_vars.copy() submodule_env_vars.update( { "VONAGE_API_KEY": "test", "VONAGE_API_SECRET": "test", "VONAGE_APPLICATION_ID": "test", "VONAGE_PRIVATE_KEY": """-----BEGIN PRIVATE KEY----- fake_key -----END PRIVATE KEY-----""", "BASE_URL": "test", "CALL_SERVER_BASE_URL": "test2", } ) return submodule_env_vars
Extends the `default_env_vars` fixture specifically for the submodule. This fixture takes the session-scoped `default_env_vars` fixture from the parent conftest.py and extends or overrides it with additional or modified environment variables specific to the submodule. :param default_env_vars: The inherited `default_env_vars` fixture from the parent conftest. :return: A modified dictionary of default environment variables for the submodule.
default_env_vars
python
vocodedev/vocode-core
tests/streaming/action/conftest.py
https://github.com/vocodedev/vocode-core/blob/master/tests/streaming/action/conftest.py
MIT
def action_config() -> dict: """Provides a common action configuration for tests.""" return { "processing_mode": "muted", "name": "name", "description": "A description", "url": "https://example.com", "input_schema": json.dumps(ACTION_INPUT_SCHEMA), "speak_on_send": True, "speak_on_receive": True, "signature_secret": base64.b64encode(os.urandom(32)).decode(), }
Provides a common action configuration for tests.
action_config
python
vocodedev/vocode-core
tests/streaming/action/test_external_actions.py
https://github.com/vocodedev/vocode-core/blob/master/tests/streaming/action/test_external_actions.py
MIT
def execute_action_setup(mocker, action_config) -> ExecuteExternalAction: """Common setup for creating an ExecuteExternalAction instance.""" action = ExecuteExternalAction( action_config=ExecuteExternalActionVocodeActionConfig(**action_config), ) mocked_requester = mocker.AsyncMock() mocked_requester.send_request.return_value = ExternalActionResponse( result={"test": "test"}, agent_message="message!", success=True, ) action.external_actions_requester = mocked_requester return action
Common setup for creating an ExecuteExternalAction instance.
execute_action_setup
python
vocodedev/vocode-core
tests/streaming/action/test_external_actions.py
https://github.com/vocodedev/vocode-core/blob/master/tests/streaming/action/test_external_actions.py
MIT
def _patched_serialize_record(text: str, record: dict) -> str: """ This function takes a text string and a record dictionary as input and returns a serialized string representation of the record. The record dictionary is expected to contain various keys related to logging information such as 'level', 'time', 'elapsed', 'exception', 'extra', 'file', 'function', 'line', 'message', 'module', 'name', 'process', 'thread'. Each key's value is processed and added to a new dictionary 'serializable'. If the 'exception' key in the record is not None, it is further processed to extract 'type', 'value', and 'traceback' information. The 'serializable' dictionary is then converted to a JSON string using json.dumps. The 'default' parameter is set to str to convert any non-serializable types to string. The 'ensure_ascii' parameter is set to False so that the function can output non-ASCII characters as they are. The function finally returns the serialized string with a newline character appended at the end. Args: text (str): A text string. record (dict): A dictionary containing logging information. Returns: str: A serialized string representation of the record dictionary. """ exception = record["exception"] if exception is not None: exception = { "type": None if exception.type is None else exception.type.__name__, "value": exception.value, "traceback": bool(exception.traceback), } serializable = { "severity": record["level"].name, "text": text, "timestamp": record["time"].timestamp(), "elapsed": { "repr": record["elapsed"], "seconds": record["elapsed"].total_seconds(), }, "exception": exception, "ctx": get_serialized_ctx_wrappers(), "extra": record["extra"], "file": {"name": record["file"].name, "path": record["file"].path}, "function": record["function"], "level": { "icon": record["level"].icon, "name": record["level"].name, "no": record["level"].no, }, "line": record["line"], "message": record["message"], "module": record["module"], "name": record["name"], "process": {"id": record["process"].id, "name": record["process"].name}, "thread": {"id": record["thread"].id, "name": record["thread"].name}, "time": {"repr": record["time"], "timestamp": record["time"].timestamp()}, } return json.dumps(serializable, default=str, ensure_ascii=False) + "\n"
This function takes a text string and a record dictionary as input and returns a serialized string representation of the record. The record dictionary is expected to contain various keys related to logging information such as 'level', 'time', 'elapsed', 'exception', 'extra', 'file', 'function', 'line', 'message', 'module', 'name', 'process', 'thread'. Each key's value is processed and added to a new dictionary 'serializable'. If the 'exception' key in the record is not None, it is further processed to extract 'type', 'value', and 'traceback' information. The 'serializable' dictionary is then converted to a JSON string using json.dumps. The 'default' parameter is set to str to convert any non-serializable types to string. The 'ensure_ascii' parameter is set to False so that the function can output non-ASCII characters as they are. The function finally returns the serialized string with a newline character appended at the end. Args: text (str): A text string. record (dict): A dictionary containing logging information. Returns: str: A serialized string representation of the record dictionary.
_patched_serialize_record
python
vocodedev/vocode-core
vocode/logging.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/logging.py
MIT
def emit(self, record: logging.LogRecord) -> None: # pragma: no cover """ Propagates logs to loguru. :param record: record to log. """ try: level: str | int = logger.level(record.levelname).name except ValueError: level = record.levelno # Find caller from where originated the logged message frame, depth = logging.currentframe(), 2 while ( frame.f_code.co_filename == logging.__file__ or frame.f_code.co_filename == __file__ or "sentry_sdk/integrations" in frame.f_code.co_filename ): frame = frame.f_back # type: ignore depth += 1 logger.opt(depth=depth, exception=record.exc_info).log( level, record.getMessage(), )
Propagates logs to loguru. :param record: record to log.
emit
python
vocodedev/vocode-core
vocode/logging.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/logging.py
MIT
def configure_intercepter() -> None: """ Configures the logging system to intercept log messages. This function sets up an InterceptHandler instance as the main handler for the root logger. It sets the logging level to INFO, meaning that all messages with severity INFO and above will be handled. It then iterates over all the loggers in the logging system. If a logger's name starts with "uvicorn.", it removes all handlers from that logger. This is done to prevent uvicorn's default logging configuration from interfering with our custom configuration. Finally, it sets the InterceptHandler instance as the sole handler for the "uvicorn" and "uvicorn.access" loggers. This ensures that all log messages from uvicorn and its access logger are intercepted by our custom handler. """ intercept_handler = InterceptHandler() logging.basicConfig(handlers=[intercept_handler], level=logging.INFO) for logger_name in logging.root.manager.loggerDict: if logger_name.startswith("uvicorn."): logging.getLogger(logger_name).handlers = [] logging.getLogger("uvicorn").handlers = [intercept_handler] logging.getLogger("uvicorn.access").handlers = [intercept_handler]
Configures the logging system to intercept log messages. This function sets up an InterceptHandler instance as the main handler for the root logger. It sets the logging level to INFO, meaning that all messages with severity INFO and above will be handled. It then iterates over all the loggers in the logging system. If a logger's name starts with "uvicorn.", it removes all handlers from that logger. This is done to prevent uvicorn's default logging configuration from interfering with our custom configuration. Finally, it sets the InterceptHandler instance as the sole handler for the "uvicorn" and "uvicorn.access" loggers. This ensures that all log messages from uvicorn and its access logger are intercepted by our custom handler.
configure_intercepter
python
vocodedev/vocode-core
vocode/logging.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/logging.py
MIT
def configure_pretty_logging() -> None: """ Configures the logging system to output pretty logs. This function enables the 'vocode' logger, sets up an intercept handler to capture logs from the standard logging module, removes all existing handlers from the 'loguru' logger, and adds a new handler that outputs to stdout with pretty formatting (colored, not serialized, no backtrace or diagnosis information). """ logger.enable("vocode") configure_intercepter() logger.remove() logger.add( sys.stdout, level=logging.DEBUG, backtrace=False, diagnose=False, serialize=False, colorize=True, )
Configures the logging system to output pretty logs. This function enables the 'vocode' logger, sets up an intercept handler to capture logs from the standard logging module, removes all existing handlers from the 'loguru' logger, and adds a new handler that outputs to stdout with pretty formatting (colored, not serialized, no backtrace or diagnosis information).
configure_pretty_logging
python
vocodedev/vocode-core
vocode/logging.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/logging.py
MIT
def configure_json_logging() -> None: """ Configures the logging system to output logs in JSON format. This function enables the 'vocode' logger, sets up an intercept handler to capture logs from the standard logging module, removes all existing handlers from the 'loguru' logger, and adds a new handler that outputs to stdout with JSON formatting (serialized, no backtrace or diagnosis information). """ logger.enable("vocode") configure_intercepter() logger.remove() logger.add( sys.stdout, format="{message}", level=logging.DEBUG, backtrace=False, diagnose=False, serialize=True, )
Configures the logging system to output logs in JSON format. This function enables the 'vocode' logger, sets up an intercept handler to capture logs from the standard logging module, removes all existing handlers from the 'loguru' logger, and adds a new handler that outputs to stdout with JSON formatting (serialized, no backtrace or diagnosis information).
configure_json_logging
python
vocodedev/vocode-core
vocode/logging.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/logging.py
MIT
async def check_for_idle(self): """Asks if human is still on the line if no activity is detected, and terminates the conversation if not.""" await self.initial_message_tracker.wait() check_human_present_count = 0 check_human_present_threshold = self.agent.get_agent_config().num_check_human_present_times while self.is_active(): if check_human_present_count > 0 and self.is_human_still_there == True: # Reset the counter if the human is still there check_human_present_count = 0 if ( not self.check_for_idle_paused ) and time.time() - self.last_action_timestamp > self.idle_time_threshold: if check_human_present_count >= check_human_present_threshold: # Stop the phone call after some retries to prevent infinitely long call where human is just silent. await self.action_on_idle() self.is_human_still_there = False await self.send_single_message( message=BaseMessage(text=random.choice(CHECK_HUMAN_PRESENT_MESSAGE_CHOICES)), ) check_human_present_count += 1 # wait till the idle time would have passed the threshold if no action occurs await asyncio.sleep(self.idle_time_threshold / 2)
Asks if human is still on the line if no activity is detected, and terminates the conversation if not.
check_for_idle
python
vocodedev/vocode-core
vocode/streaming/streaming_conversation.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/streaming_conversation.py
MIT
async def broadcast_interrupt(self): """Stops all inflight events and cancels all workers that are sending output Returns true if any events were interrupted - which is used as a flag for the agent (is_interrupt) """ async with self.interrupt_lock: num_interrupts = 0 while True: try: interruptible_event = self.interruptible_events.get_nowait() if not interruptible_event.is_interrupted(): if interruptible_event.interrupt(): logger.debug( f"Interrupting event {type(interruptible_event.payload)} {interruptible_event.payload}", ) num_interrupts += 1 except queue.Empty: break self.output_device.interrupt() self.agent.cancel_current_task() self.agent_responses_worker.cancel_current_task() if self.actions_worker: self.actions_worker.cancel_current_task() return num_interrupts > 0
Stops all inflight events and cancels all workers that are sending output Returns true if any events were interrupted - which is used as a flag for the agent (is_interrupt)
broadcast_interrupt
python
vocodedev/vocode-core
vocode/streaming/streaming_conversation.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/streaming_conversation.py
MIT
async def send_speech_to_output( self, message: str, synthesis_result: SynthesisResult, stop_event: threading.Event, seconds_per_chunk: float, transcript_message: Optional[Message] = None, started_event: Optional[threading.Event] = None, ): """ - Sends the speech chunk by chunk to the output device - update the transcript message as chunks come in (transcript_message is always provided for non filler audio utterances) - If the stop_event is set, the output is stopped - Sets started_event when the first chunk is sent Returns the message that was sent up to, and a flag if the message was cut off """ seconds_spoken = 0.0 def create_on_play_callback( chunk_idx: int, processed_event: asyncio.Event, ): def _on_play(): if chunk_idx == 0: if started_event: started_event.set() if first_chunk_span: self._track_first_chunk(first_chunk_span, synthesis_result) nonlocal seconds_spoken self.mark_last_action_timestamp() seconds_spoken += seconds_per_chunk if transcript_message: transcript_message.text = synthesis_result.get_message_up_to(seconds_spoken) processed_event.set() return _on_play def create_on_interrupt_callback( processed_event: asyncio.Event, ): def _on_interrupt(): processed_event.set() return _on_interrupt if self.transcriber.get_transcriber_config().mute_during_speech: logger.debug("Muting transcriber") self.transcriber.mute() logger.debug(f"Start sending speech {message} to output") first_chunk_span = self._maybe_create_first_chunk_span(synthesis_result, message) audio_chunks: List[AudioChunk] = [] processed_events: List[asyncio.Event] = [] interrupted_before_all_chunks_sent = False async for chunk_idx, chunk_result in enumerate_async_iter(synthesis_result.chunk_generator): if stop_event.is_set(): logger.debug("Interrupted before all chunks were sent") interrupted_before_all_chunks_sent = True break processed_event = asyncio.Event() audio_chunk = AudioChunk( data=chunk_result.chunk, ) # register callbacks setattr(audio_chunk, "on_play", create_on_play_callback(chunk_idx, processed_event)) setattr( audio_chunk, "on_interrupt", create_on_interrupt_callback(processed_event), ) # Prevents the case where we send a chunk after the output device has been interrupted async with self.interrupt_lock: self.output_device.consume_nonblocking( InterruptibleEvent( payload=audio_chunk, is_interruptible=True, interruption_event=stop_event, ), ) audio_chunks.append(audio_chunk) processed_events.append(processed_event) logger.debug("Finished sending chunks to the output device") if processed_events: await processed_events[-1].wait() maybe_first_interrupted_audio_chunk = next( ( audio_chunk for audio_chunk in audio_chunks if audio_chunk.state == ChunkState.INTERRUPTED ), None, ) cut_off = ( interrupted_before_all_chunks_sent or maybe_first_interrupted_audio_chunk is not None ) if ( transcript_message and not cut_off ): # if the audio was not cut off, we can set the transcript message to the full message transcript_message.text = synthesis_result.get_message_up_to(None) if self.transcriber.get_transcriber_config().mute_during_speech: logger.debug("Unmuting transcriber") self.transcriber.unmute() if transcript_message: transcript_message.is_final = not cut_off message_sent = transcript_message.text if transcript_message and cut_off else message if synthesis_result.synthesis_total_span: synthesis_result.synthesis_total_span.finish() return message_sent, cut_off
- Sends the speech chunk by chunk to the output device - update the transcript message as chunks come in (transcript_message is always provided for non filler audio utterances) - If the stop_event is set, the output is stopped - Sets started_event when the first chunk is sent Returns the message that was sent up to, and a flag if the message was cut off
send_speech_to_output
python
vocodedev/vocode-core
vocode/streaming/streaming_conversation.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/streaming_conversation.py
MIT
async def _end_of_run_hook(self) -> None: """This method is called at the end of the run method. It is optional but intended to be overridden if needed.""" pass
This method is called at the end of the run method. It is optional but intended to be overridden if needed.
_end_of_run_hook
python
vocodedev/vocode-core
vocode/streaming/action/end_conversation.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/action/end_conversation.py
MIT
def merge_event_logs(event_logs: List[EventLog]) -> List[EventLog]: """Returns a new list of event logs where consecutive bot messages are merged.""" new_event_logs: List[EventLog] = [] idx = 0 while idx < len(event_logs): bot_messages_buffer: List[Message] = [] current_log = event_logs[idx] while isinstance(current_log, Message) and current_log.sender == Sender.BOT: bot_messages_buffer.append(current_log) idx += 1 try: current_log = event_logs[idx] except IndexError: break if bot_messages_buffer: merged_bot_message = deepcopy(bot_messages_buffer[-1]) merged_bot_message.text = " ".join(event_log.text for event_log in bot_messages_buffer) new_event_logs.append(merged_bot_message) else: new_event_logs.append(current_log) idx += 1 return new_event_logs
Returns a new list of event logs where consecutive bot messages are merged.
merge_event_logs
python
vocodedev/vocode-core
vocode/streaming/agent/openai_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/agent/openai_utils.py
MIT
def split_sentences(text: str) -> List[str]: """Splits text into sentences and preserve trailing periods. Merge sentences that are just numbers, as they are part of lists. """ initial_split = text.split(". ") final_split = [] buffer = "" for i, sentence in enumerate(initial_split): is_last = i == len(initial_split) - 1 buffer += sentence if not is_last: buffer += ". " if not re.fullmatch(r"\d+", sentence.strip()): final_split.append(buffer.strip()) buffer = "" if buffer.strip(): final_split.append(buffer.strip()) return [sentence for sentence in final_split if sentence]
Splits text into sentences and preserve trailing periods. Merge sentences that are just numbers, as they are part of lists.
split_sentences
python
vocodedev/vocode-core
vocode/streaming/agent/streaming_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/agent/streaming_utils.py
MIT
def num_tokens_from_messages(messages: List[dict], model: str = "gpt-3.5-turbo-0613"): """Return the number of tokens used by a list of messages.""" tokenizer_info = get_tokenizer_info(model) if tokenizer_info is None: raise NotImplementedError( f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""" ) num_tokens = 0 for message in messages: num_tokens += tokenizer_info.tokens_per_message num_tokens += tokens_from_dict( encoding=tokenizer_info.encoding, d=message, tokens_per_name=tokenizer_info.tokens_per_name, ) num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> return num_tokens
Return the number of tokens used by a list of messages.
num_tokens_from_messages
python
vocodedev/vocode-core
vocode/streaming/agent/token_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/agent/token_utils.py
MIT
def tokens_from_dict(encoding: tiktoken.Encoding, d: Dict[str, Any], tokens_per_name: int) -> int: """Return the number of OpenAI tokens in a dict.""" num_tokens: int = 0 for key, value in d.items(): if value is None: continue if isinstance(value, str): num_tokens += len(encoding.encode(value)) if key == "name": num_tokens += tokens_per_name elif isinstance(value, dict): num_tokens += tokens_from_dict( encoding=encoding, d=value, tokens_per_name=tokens_per_name ) return num_tokens
Return the number of OpenAI tokens in a dict.
tokens_from_dict
python
vocodedev/vocode-core
vocode/streaming/agent/token_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/agent/token_utils.py
MIT
def num_tokens_from_functions(functions: List[dict] | None, model="gpt-3.5-turbo-0613") -> int: """Return the number of tokens used by a list of functions.""" if not functions: return 0 try: encoding = tiktoken.encoding_for_model(model) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") encoding = tiktoken.get_encoding("cl100k_base") function_overhead = 3 + len(encoding.encode(_FUNCTION_OVERHEAD_STR)) return function_overhead + sum( len(encoding.encode(_format_func_into_prompt_str(func=f))) for f in functions )
Return the number of tokens used by a list of functions.
num_tokens_from_functions
python
vocodedev/vocode-core
vocode/streaming/agent/token_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/agent/token_utils.py
MIT
async def initialize_source(self, room: rtc.Room): """Creates the AudioSource that will be used to capture audio frames. Can only be called once the room has set up its track callbcks """ self.room = room source = rtc.AudioSource(self.sampling_rate, NUM_CHANNELS) track = rtc.LocalAudioTrack.create_audio_track("agent-synthesis", source) options = rtc.TrackPublishOptions() options.source = rtc.TrackSource.SOURCE_MICROPHONE await self.room.local_participant.publish_track(track, options) self.track = track self.source = source
Creates the AudioSource that will be used to capture audio frames. Can only be called once the room has set up its track callbcks
initialize_source
python
vocodedev/vocode-core
vocode/streaming/output_device/livekit_output_device.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/output_device/livekit_output_device.py
MIT
async def play(self, chunk: bytes): """Sends an audio chunk to immediate playback""" pass
Sends an audio chunk to immediate playback
play
python
vocodedev/vocode-core
vocode/streaming/output_device/rate_limit_interruptions_output_device.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/output_device/rate_limit_interruptions_output_device.py
MIT
async def listen() -> None: """Listen to the websocket for audio data and stream it.""" first_message = True buffer = bytearray() while True: message = await ws.recv() if "audio" not in message: continue response = ElevenLabsWebsocketResponse.model_validate_json(message) if response.audio: decoded = base64.b64decode(response.audio) seconds = len(decoded) / ( self.sample_width * self.synthesizer_config.sampling_rate ) if self.upsample: decoded = self._resample_chunk( decoded, self.sample_rate, self.upsample, ) seconds = len(decoded) / (self.sample_width * self.sample_rate) if response.alignment: utterance_chunk = "".join(response.alignment.chars) + " " self.current_turn_utterances_by_chunk.append((utterance_chunk, seconds)) # For backchannels, send them all as one chunk (so it can't be interrupted) and reduce the volume # so that in the case of a false endpoint, the backchannel is not too loud. if first_message and backchannelled: buffer.extend(decoded) logger.info("First message was a backchannel, reducing volume.") reduced_amplitude_buffer = self.reduce_chunk_amplitude( buffer, factor=self.synthesizer_config.backchannel_amplitude_factor ) await self.voice_packet_queue.put(reduced_amplitude_buffer) buffer = bytearray() first_message = False else: buffer.extend(decoded) for chunk_idx in range(0, len(buffer) - chunk_size, chunk_size): await self.voice_packet_queue.put( buffer[chunk_idx : chunk_idx + chunk_size] ) buffer = buffer[len(buffer) - (len(buffer) % chunk_size) :] if response.isFinal: await self.voice_packet_queue.put(None) break
Listen to the websocket for audio data and stream it.
listen
python
vocodedev/vocode-core
vocode/streaming/synthesizer/eleven_labs_websocket_synthesizer.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/synthesizer/eleven_labs_websocket_synthesizer.py
MIT
async def create_speech_uncached( self, message: BaseMessage, chunk_size: int, is_first_text_chunk: bool = False, is_sole_text_chunk: bool = False, ): """ Ran when doing utterance parsing. ie: "Hello, my name is foo." """ if not self.websocket_listener: self.websocket_listener = asyncio.create_task( self.establish_websocket_listeners(chunk_size) ) if isinstance(message, BotBackchannel): if not message.text.endswith(" "): message.text += " " await self.text_chunk_queue.put(message) self.total_chars += len(message.text) else: async for text in string_chunker(message.text): await self.text_chunk_queue.put(LLMToken(text=text)) self.total_chars += len(text) return self.get_current_utterance_synthesis_result()
Ran when doing utterance parsing. ie: "Hello, my name is foo."
create_speech_uncached
python
vocodedev/vocode-core
vocode/streaming/synthesizer/eleven_labs_websocket_synthesizer.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/synthesizer/eleven_labs_websocket_synthesizer.py
MIT
async def send_token_to_synthesizer(self, message: LLMToken, chunk_size: int): """ Ran when parsing a single chunk of text. ie: "Hello," """ self.total_chars += len(message.text) if not self.websocket_listener: self.websocket_listener = asyncio.create_task( self.establish_websocket_listeners(chunk_size) ) await self.text_chunk_queue.put(message) return None
Ran when parsing a single chunk of text. ie: "Hello,"
send_token_to_synthesizer
python
vocodedev/vocode-core
vocode/streaming/synthesizer/eleven_labs_websocket_synthesizer.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/synthesizer/eleven_labs_websocket_synthesizer.py
MIT
async def generate_chunks( play_ht_chunk: bytes, cut_leading_silence=False, ) -> AsyncGenerator[bytes, None]: """Yields chunks of size chunk_size from play_ht_chunk and leaves the remainder in buffer. If cut_leading_silence is True, does not yield chunks until it detects voice. """ nonlocal buffer buffer.extend(play_ht_chunk) detected_voice = False for buffer_idx, chunk in self._enumerate_by_chunk_size(buffer, chunk_size): if cut_leading_silence and not detected_voice: if self._contains_voice_experimental(chunk): detected_voice = True yield chunk if detected_voice: logger.debug(f"Cut off {buffer_idx} bytes of leading silence") else: yield chunk buffer = buffer[len(buffer) - (len(buffer) % chunk_size) :]
Yields chunks of size chunk_size from play_ht_chunk and leaves the remainder in buffer. If cut_leading_silence is True, does not yield chunks until it detects voice.
generate_chunks
python
vocodedev/vocode-core
vocode/streaming/synthesizer/play_ht_synthesizer_v2.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/synthesizer/play_ht_synthesizer_v2.py
MIT
async def _cut_out_trailing_silence( trailing_chunk: bytes, ) -> AsyncGenerator[bytes, None]: """Yields chunks of size chunk_size from trailing_chunk until it detects silence.""" for buffer_idx, chunk in self._enumerate_by_chunk_size(trailing_chunk, chunk_size): if not self._contains_voice_experimental(chunk): logger.debug( f"Cutting off {len(trailing_chunk) - buffer_idx} bytes of trailing silence", ) break yield chunk
Yields chunks of size chunk_size from trailing_chunk until it detects silence.
_cut_out_trailing_silence
python
vocodedev/vocode-core
vocode/streaming/synthesizer/play_ht_synthesizer_v2.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/synthesizer/play_ht_synthesizer_v2.py
MIT
def __init__( self, prefix: Optional[str] = None, suffix: Optional[str] = None, ) -> None: """ Initialize a RedisGenericMessageQueue instance. This initializes a Redis client and sets the name of the stream. """ self.redis: Redis = initialize_redis() queue_name_prefix = f"{prefix}_" if prefix else "" queue_name_suffix = f"_{suffix}" if suffix else "" self.queue_name = f"{queue_name_prefix}queue{queue_name_suffix}"
Initialize a RedisGenericMessageQueue instance. This initializes a Redis client and sets the name of the stream.
__init__
python
vocodedev/vocode-core
vocode/streaming/utils/redis.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/utils/redis.py
MIT
async def publish(self, message: dict) -> None: """ Publishes a message to the Redis stream. Args: message (dict): The message to be published. Returns: None """ logger.info(f"[{self.queue_name}] Publishing message: {message}") try: await self.redis.xadd(self.queue_name, message) except Exception as e: logger.exception(f"[{self.queue_name}] Failed to publish message: {message}") raise e
Publishes a message to the Redis stream. Args: message (dict): The message to be published. Returns: None
publish
python
vocodedev/vocode-core
vocode/streaming/utils/redis.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/utils/redis.py
MIT
async def process(self, item): """ Publish results onto output queue. Calls to async function / task should be able to handle asyncio.CancelledError gracefully and not re-raise it """ raise NotImplementedError
Publish results onto output queue. Calls to async function / task should be able to handle asyncio.CancelledError gracefully and not re-raise it
process
python
vocodedev/vocode-core
vocode/streaming/utils/worker.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/utils/worker.py
MIT
def interrupt(self) -> bool: """ Returns True if the event was interruptible and is now interrupted. """ if not self.is_interruptible: return False self.interruption_event.set() return True
Returns True if the event was interruptible and is now interrupted.
interrupt
python
vocodedev/vocode-core
vocode/streaming/utils/worker.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/utils/worker.py
MIT
async def process(self, item: InterruptibleEventType): """ Publish results onto output queue. Calls to async function / task should be able to handle asyncio.CancelledError gracefully: """ raise NotImplementedError
Publish results onto output queue. Calls to async function / task should be able to handle asyncio.CancelledError gracefully:
process
python
vocodedev/vocode-core
vocode/streaming/utils/worker.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/utils/worker.py
MIT
def cancel_current_task(self): """Free up the resources. That's useful so implementors do not have to implement this but: - threads tasks won't be able to be interrupted. Hopefully not too much of a big deal Threads will also get a reference to the interruptible event - asyncio tasks will still have to handle CancelledError and clean up resources """ if ( self.current_task and not self.current_task.done() and self.interruptible_event.is_interruptible ): return self.current_task.cancel() return False
Free up the resources. That's useful so implementors do not have to implement this but: - threads tasks won't be able to be interrupted. Hopefully not too much of a big deal Threads will also get a reference to the interruptible event - asyncio tasks will still have to handle CancelledError and clean up resources
cancel_current_task
python
vocodedev/vocode-core
vocode/streaming/utils/worker.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/utils/worker.py
MIT
async def generate_from_async_iter_with_lookahead( async_iter: AsyncIterator[AsyncIteratorGenericType], lookahead: int, ) -> AsyncGenerator[List[AsyncIteratorGenericType], None]: """Yield sliding window lists of length `lookahead + 1` from an async iterator. If the length of async iterator < lookahead + 1, then it should just yield the whole async iterator as a list. """ assert lookahead > 0 buffer = [] stream_length = 0 while True: try: next_item = await async_iter.__anext__() stream_length += 1 buffer.append(next_item) if len(buffer) == lookahead + 1: yield buffer buffer = buffer[1:] except StopAsyncIteration: if buffer and stream_length <= lookahead: yield buffer return
Yield sliding window lists of length `lookahead + 1` from an async iterator. If the length of async iterator < lookahead + 1, then it should just yield the whole async iterator as a list.
generate_from_async_iter_with_lookahead
python
vocodedev/vocode-core
vocode/streaming/utils/__init__.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/utils/__init__.py
MIT
async def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. namespace: Optional pinecone namespace to add the texts to. Returns: List of ids from adding the texts into the vectorstore. """ # Adapted from: langchain/vectorstores/pinecone.py. Made langchain implementation async. if namespace is None: namespace = "" # Embed and create the documents docs = [] ids = ids or [str(uuid.uuid4()) for _ in texts] for i, text in enumerate(texts): embedding = await self.create_openai_embedding(text) metadata = metadatas[i] if metadatas else {} metadata[self._text_key] = text docs.append({"id": ids[i], "values": embedding, "metadata": metadata}) # upsert to Pinecone async with self.aiohttp_session.post( f"{self.pinecone_url}/vectors/upsert", headers={"Api-Key": self.pinecone_api_key}, json={ "vectors": docs, "namespace": namespace, }, ) as response: response_json = await response.json() if "message" in response_json: logger.error(f"Error upserting vectors: {response_json}") return ids
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. namespace: Optional pinecone namespace to add the texts to. Returns: List of ids from adding the texts into the vectorstore.
add_texts
python
vocodedev/vocode-core
vocode/streaming/vector_db/pinecone.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/vector_db/pinecone.py
MIT
async def similarity_search_with_score( self, query: str, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to query, along with scores. Args: query: Text to look up documents similar to. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ # Adapted from: langchain/vectorstores/pinecone.py. Made langchain implementation async. if namespace is None: namespace = "" query_obj = await self.create_openai_embedding(query) docs = [] async with self.aiohttp_session.post( f"{self.pinecone_url}/query", headers={"Api-Key": self.pinecone_api_key}, json={ "top_k": self.config.top_k, "namespace": namespace, "filter": filter, "vector": query_obj, "includeMetadata": True, }, ) as response: results = await response.json() for res in results["matches"]: metadata = res["metadata"] if self._text_key in metadata: text = metadata.pop(self._text_key) score = res["score"] docs.append((Document(page_content=text, metadata=metadata), score)) else: logger.warning(f"Found document with no `{self._text_key}` key. Skipping.") return docs
Return pinecone documents most similar to query, along with scores. Args: query: Text to look up documents similar to. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each
similarity_search_with_score
python
vocodedev/vocode-core
vocode/streaming/vector_db/pinecone.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/streaming/vector_db/pinecone.py
MIT
def __init__(self, func: Callable, *args: Tuple, **kwargs: Dict) -> None: """ Constructs all the necessary attributes for the SentryConfiguredContextManager object. Args: func (Callable): The function to be executed. *args (Tuple): The positional arguments to pass to the function. **kwargs (Dict): The keyword arguments to pass to the function. """ self.func = func self.args = args self.kwargs = kwargs self.result: Optional[Any] = None
Constructs all the necessary attributes for the SentryConfiguredContextManager object. Args: func (Callable): The function to be executed. *args (Tuple): The positional arguments to pass to the function. **kwargs (Dict): The keyword arguments to pass to the function.
__init__
python
vocodedev/vocode-core
vocode/utils/sentry_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/utils/sentry_utils.py
MIT
def is_configured(self) -> bool: """ Checks if Sentry is configured. Returns: bool: True if Sentry is configured, False otherwise. """ client = sentry_sdk.Hub.current.client if client is not None and client.options is not None and "dsn" in client.options: return True return False
Checks if Sentry is configured. Returns: bool: True if Sentry is configured, False otherwise.
is_configured
python
vocodedev/vocode-core
vocode/utils/sentry_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/utils/sentry_utils.py
MIT
def __enter__(self) -> Optional[Any]: """ Executes the function if Sentry is configured. Returns: Any: The result of the function execution, or None if Sentry is not configured. """ if self.is_configured: self.result = self.func(*self.args, **self.kwargs) return self.result else: return None
Executes the function if Sentry is configured. Returns: Any: The result of the function execution, or None if Sentry is not configured.
__enter__
python
vocodedev/vocode-core
vocode/utils/sentry_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/utils/sentry_utils.py
MIT
def __call__(self) -> Optional[Any]: """ Executes the function if Sentry is configured, and prints a message if it's not. Returns: Any: The result of the function execution, or None if Sentry is not configured. """ if self.is_configured: return self.func(*self.args, **self.kwargs) else: logger.debug("Sentry is not configured, skipping function execution.") return None
Executes the function if Sentry is configured, and prints a message if it's not. Returns: Any: The result of the function execution, or None if Sentry is not configured.
__call__
python
vocodedev/vocode-core
vocode/utils/sentry_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/utils/sentry_utils.py
MIT
def synthesizer_base_name_if_should_report_to_sentry( synthesizer: "BaseSynthesizer", ) -> Optional[str]: """Returns a synthesizer name if we should report metrics to Sentry for this kind of synthesizer; else returns None. """ return f"synthesizer.{_SYNTHESIZER_NAMES.get(synthesizer.__class__.__qualname__)}"
Returns a synthesizer name if we should report metrics to Sentry for this kind of synthesizer; else returns None.
synthesizer_base_name_if_should_report_to_sentry
python
vocodedev/vocode-core
vocode/utils/sentry_utils.py
https://github.com/vocodedev/vocode-core/blob/master/vocode/utils/sentry_utils.py
MIT
def HandleRequest(req, method, post_data=None): """Sample dynamic HTTP response handler. Parameters ---------- req : BaseHTTPServer.BaseHTTPRequestHandler The BaseHTTPRequestHandler that recevied the request method: str The HTTP method, either 'HEAD', 'GET', 'POST' as of this writing post_data: str The HTTP post data received by calling `rfile.read()` against the BaseHTTPRequestHandler that received the request. """ response = b'Ahoy\r\n' if method == 'GET': req.send_response(200) req.send_header('Content-Length', len(response)) req.end_headers() req.wfile.write(response) elif method == 'POST': req.send_response(200) req.send_header('Content-Length', len(response)) req.end_headers() req.wfile.write(response) elif method == 'HEAD': req.send_response(200) req.end_headers()
Sample dynamic HTTP response handler. Parameters ---------- req : BaseHTTPServer.BaseHTTPRequestHandler The BaseHTTPRequestHandler that recevied the request method: str The HTTP method, either 'HEAD', 'GET', 'POST' as of this writing post_data: str The HTTP post data received by calling `rfile.read()` against the BaseHTTPRequestHandler that received the request.
HandleRequest
python
mandiant/flare-fakenet-ng
fakenet/configs/CustomProviderExample.py
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/configs/CustomProviderExample.py
Apache-2.0
def HandleTcp(sock): """Handle a TCP buffer. Parameters ---------- sock : socket The connected socket with which to recv and send data """ while True: try: data = None data = sock.recv(1024) except socket.timeout: pass if not data: break resp = input('\nEnter a response for the TCP client: ') sock.sendall(resp.encode())
Handle a TCP buffer. Parameters ---------- sock : socket The connected socket with which to recv and send data
HandleTcp
python
mandiant/flare-fakenet-ng
fakenet/configs/CustomProviderExample.py
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/configs/CustomProviderExample.py
Apache-2.0
def HandleUdp(sock, data, addr): """Handle a UDP buffer. Parameters ---------- sock : socket The connected socket with which to recv and send data data : str The data received addr : tuple The host and port of the remote peer """ if data: resp = input('\nEnter a response for the UDP client: ') sock.sendto(resp.encode(), addr)
Handle a UDP buffer. Parameters ---------- sock : socket The connected socket with which to recv and send data data : str The data received addr : tuple The host and port of the remote peer
HandleUdp
python
mandiant/flare-fakenet-ng
fakenet/configs/CustomProviderExample.py
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/configs/CustomProviderExample.py
Apache-2.0
def first_packet_new_session(self): """Is this the first datagram from this conversation? Returns: True if this pair of endpoints hasn't conversed before, else False """ # sessions.get returns (dst_ip, dport, pid, comm, dport0, proto) or # None. We just want dst_ip and dport for comparison. session = self.diverter.sessions.get(self.pkt.sport) if session is None: return True return not ((session.dst_ip, session.dport) == (self.pkt.dst_ip, self.pkt.dport))
Is this the first datagram from this conversation? Returns: True if this pair of endpoints hasn't conversed before, else False
first_packet_new_session
python
mandiant/flare-fakenet-ng
fakenet/diverters/diverterbase.py
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
Apache-2.0
def _validateBlackWhite(self): """Validate that only a black or a white list of either type (host or process) is configured. Side-effect: Raises ListenerBlackWhiteList if invalid """ msg = None fmt = 'Cannot specify both %s blacklist and whitelist for port %d' if self.proc_wl and self.proc_bl: msg = fmt % ('process', self.port) self.proc_wl = self.proc_bl = None elif self.host_wl and self.host_bl: msg = fmt % ('host', self.port) self.host_wl = self.host_bl = None if msg: raise ListenerBlackWhiteList(msg)
Validate that only a black or a white list of either type (host or process) is configured. Side-effect: Raises ListenerBlackWhiteList if invalid
_validateBlackWhite
python
mandiant/flare-fakenet-ng
fakenet/diverters/diverterbase.py
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
Apache-2.0
def addListener(self, listener): """Add a ListenerMeta under the corresponding protocol and port.""" proto = listener.proto port = listener.port if not proto in self.protos: self.protos[proto] = {} if port in self.protos[proto]: raise ListenerAlreadyBoundThere( 'Listener already bound to %s port %s' % (proto, port)) self.protos[proto][port] = listener
Add a ListenerMeta under the corresponding protocol and port.
addListener
python
mandiant/flare-fakenet-ng
fakenet/diverters/diverterbase.py
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
Apache-2.0
def isHidden(self, proto, port): """Is this port associated with a listener that is hidden?""" listener = self.getListenerMeta(proto, port) return listener.hidden if listener else False
Is this port associated with a listener that is hidden?
isHidden
python
mandiant/flare-fakenet-ng
fakenet/diverters/diverterbase.py
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
Apache-2.0